return DenormMode == DenormalMode::getIEEE();
}
+// The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe
+// floating point atomic instructions. May generate more efficient code,
+// but may not respect rounding and denormal modes, and may give incorrect
+// results for certain memory destinations.
+bool unsafeFPAtomicsDisabled(Function *F) {
+ return F->getFnAttribute("amdgpu-unsafe-fp-atomics").getValueAsString() !=
+ "true";
+}
+
TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
unsigned AS = RMW->getPointerAddressSpace();
if (AS == AMDGPUAS::PRIVATE_ADDRESS)
return AtomicExpansionKind::NotAtomic;
+ auto SSID = RMW->getSyncScopeID();
+
auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) {
OptimizationRemarkEmitter ORE(RMW->getFunction());
LLVMContext &Ctx = RMW->getFunction()->getContext();
return Kind;
};
+ bool HasSystemScope =
+ SSID == SyncScope::System ||
+ SSID == RMW->getContext().getOrInsertSyncScopeID("one-as");
+
switch (RMW->getOperation()) {
case AtomicRMWInst::FAdd: {
Type *Ty = RMW->getType();
if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy()))
return AtomicExpansionKind::CmpXChg;
- if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) &&
+ if (AMDGPU::isFlatGlobalAddrSpace(AS) &&
Subtarget->hasAtomicFaddNoRtnInsts()) {
- // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe
- // floating point atomic instructions. May generate more efficient code,
- // but may not respect rounding and denormal modes, and may give incorrect
- // results for certain memory destinations.
- if (RMW->getFunction()
- ->getFnAttribute("amdgpu-unsafe-fp-atomics")
- .getValueAsString() != "true")
+ if (unsafeFPAtomicsDisabled(RMW->getFunction()))
return AtomicExpansionKind::CmpXChg;
// Always expand system scope fp atomics.
- auto SSID = RMW->getSyncScopeID();
- if (SSID == SyncScope::System ||
- SSID == RMW->getContext().getOrInsertSyncScopeID("one-as"))
+ if (HasSystemScope)
return AtomicExpansionKind::CmpXChg;
if (AS == AMDGPUAS::GLOBAL_ADDRESS && Ty->isFloatTy()) {
return AtomicExpansionKind::CmpXChg;
}
+ case AtomicRMWInst::FMin:
+ case AtomicRMWInst::FMax:
+ case AtomicRMWInst::Min:
+ case AtomicRMWInst::Max:
+ case AtomicRMWInst::UMin:
+ case AtomicRMWInst::UMax: {
+ if (AMDGPU::isFlatGlobalAddrSpace(AS)) {
+ if (RMW->getType()->isFloatTy() &&
+ unsafeFPAtomicsDisabled(RMW->getFunction()))
+ return AtomicExpansionKind::CmpXChg;
+
+ // Always expand system scope min/max atomics.
+ if (HasSystemScope)
+ return AtomicExpansionKind::CmpXChg;
+ }
+ break;
+ }
default:
break;
}
ret void
}
-define protected amdgpu_kernel void @max(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
-; CHECK-LABEL: max:
+define protected amdgpu_kernel void @max_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: max_workgroup:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
+ %n32 = atomicrmw max i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic
+ %n64 = zext i32 %n32 to i64
+ %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
+ store float 1.0, float addrspace(1)* %p1
+ ret void
+}
+
+define protected amdgpu_kernel void @max(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: max:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b64 s[4:5], 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
+; CHECK-NEXT: .LBB7_1: ; %atomicrmw.start
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_max_i32_e32 v2, 1, v3
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execnz .LBB7_1
+; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
%n32 = atomicrmw max i32 addrspace(1)* %p, i32 1 monotonic
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
ret void
}
-define protected amdgpu_kernel void @min(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
-; CHECK-LABEL: min:
+define protected amdgpu_kernel void @min_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: min_workgroup:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
+ %n32 = atomicrmw min i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic
+ %n64 = zext i32 %n32 to i64
+ %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
+ store float 1.0, float addrspace(1)* %p1
+ ret void
+}
+
+define protected amdgpu_kernel void @min(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: min:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b64 s[4:5], 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
+; CHECK-NEXT: .LBB9_1: ; %atomicrmw.start
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_min_i32_e32 v2, 1, v3
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execnz .LBB9_1
+; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
%n32 = atomicrmw min i32 addrspace(1)* %p, i32 1 monotonic
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
ret void
}
-define protected amdgpu_kernel void @umax(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
-; CHECK-LABEL: umax:
+define protected amdgpu_kernel void @umax_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: umax_workgroup:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
+ %n32 = atomicrmw umax i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic
+ %n64 = zext i32 %n32 to i64
+ %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
+ store float 1.0, float addrspace(1)* %p1
+ ret void
+}
+
+define protected amdgpu_kernel void @umax(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: umax:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b64 s[4:5], 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
+; CHECK-NEXT: .LBB11_1: ; %atomicrmw.start
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_max_u32_e32 v2, 1, v3
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execnz .LBB11_1
+; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
%n32 = atomicrmw umax i32 addrspace(1)* %p, i32 1 monotonic
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
ret void
}
-define protected amdgpu_kernel void @umin(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
-; CHECK-LABEL: umin:
+define protected amdgpu_kernel void @umin_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: umin_workgroup:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
+ %n32 = atomicrmw umin i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic
+ %n64 = zext i32 %n32 to i64
+ %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
+ store float 1.0, float addrspace(1)* %p1
+ ret void
+}
+
+define protected amdgpu_kernel void @umin(i32 addrspace(1)* %p, %S addrspace(1)* %q) {
+; CHECK-LABEL: umin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b64 s[4:5], 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s6
+; CHECK-NEXT: .LBB13_1: ; %atomicrmw.start
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_min_u32_e32 v2, 1, v3
+; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_execnz .LBB13_1
+; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
+; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
+; CHECK-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
%n32 = atomicrmw umin i32 addrspace(1)* %p, i32 1 monotonic
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0
; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s6
-; CHECK-NEXT: .LBB14_1: ; %atomicrmw.start
+; CHECK-NEXT: .LBB18_1: ; %atomicrmw.start
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_mov_b32_e32 v3, v0
; CHECK-NEXT: v_add_f32_e32 v2, 1.0, v3
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; CHECK-NEXT: s_cbranch_execnz .LBB14_1
+; CHECK-NEXT: s_cbranch_execnz .LBB18_1
; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0
; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s6
-; CHECK-NEXT: .LBB15_1: ; %atomicrmw.start
+; CHECK-NEXT: .LBB19_1: ; %atomicrmw.start
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: v_mov_b32_e32 v3, v0
; CHECK-NEXT: v_add_f32_e32 v2, -1.0, v3
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; CHECK-NEXT: s_cbranch_execnz .LBB15_1
+; CHECK-NEXT: s_cbranch_execnz .LBB19_1
; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i32_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_max_i32_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i32_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_max_i32_addr64_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i32:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_max_i32:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s4
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s4
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i32_addr64:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_max_i32_addr64:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile max i32* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i32_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umax_i32_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i32_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umax_i32_addr64_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i32:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umax_i32:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s4
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s4
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i32_addr64:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umax_i32_addr64:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i32_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_min_i32_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i32_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_min_i32_addr64_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i32:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_min_i32:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s4
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s4
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i32_addr64:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_min_i32_addr64:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile min i32* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i32_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umin_i32_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32* %out, i32 4
- %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i32_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umin_i32_addr64_offset:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v[0:1], v2 offset:16
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
%gep = getelementptr i32, i32* %ptr, i32 4
- %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i32:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umin_i32:
; GCN3-NEXT: v_mov_b32_e32 v0, s2
; GCN3-NEXT: v_mov_b32_e32 v1, s3
; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s4
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s4
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s4
; GCN3-NEXT: v_mov_b32_e32 v1, s5
; GCN3-NEXT: v_mov_b32_e32 v2, s2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v[0:1], v2
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i32_addr64:
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v[0:1], v2
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
;
; GCN3-LABEL: atomic_umin_i32_addr64:
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v[0:1], v2
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s0
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s8
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v0, s6
; GCN1-NEXT: v_mov_b32_e32 v1, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dword v[0:1], v2
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v0, s0
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s8
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v0, s6
; GCN2-NEXT: v_mov_b32_e32 v1, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dword v[0:1], v2
; GCN2-NEXT: s_endpgm
;
; GCN3-NEXT: v_mov_b32_e32 v0, s0
; GCN3-NEXT: v_mov_b32_e32 v1, s1
; GCN3-NEXT: v_mov_b32_e32 v2, s8
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: v_mov_b32_e32 v0, s6
; GCN3-NEXT: v_mov_b32_e32 v1, s7
+; GCN3-NEXT: s_waitcnt vmcnt(0)
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32* %out, i64 %index
- %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i64_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, 0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, 0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i64:
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s6
; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s6
; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_max_i64_addr64:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, s5
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, s5
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i64_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, 0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, 0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i64:
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s6
; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s6
; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umax_i64_addr64:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, s5
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, s5
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i64_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, 0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, 0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i64:
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s6
; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s6
; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_min_i64_addr64:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, s5
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, s5
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v0, s2
; GCN1-NEXT: v_mov_b32_e32 v1, s3
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i64_offset:
; GCN2-NEXT: v_mov_b32_e32 v0, s2
; GCN2-NEXT: v_mov_b32_e32 v1, s3
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i64_addr64_offset:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, 0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, 0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
%gep = getelementptr i64, i64* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i64:
; GCN2-NEXT: v_mov_b32_e32 v1, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v1, s5
; GCN1-NEXT: v_mov_b32_e32 v2, s0
; GCN1-NEXT: v_mov_b32_e32 v3, s1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s6
; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: v_mov_b32_e32 v1, s5
; GCN2-NEXT: v_mov_b32_e32 v2, s0
; GCN2-NEXT: v_mov_b32_e32 v3, s1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s6
; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v1, s7
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: s_endpgm
;
; GCN2-LABEL: atomic_umin_i64_addr64:
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v1, s7
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; GCN1-NEXT: s_addc_u32 s1, s1, s5
; GCN1-NEXT: v_mov_b32_e32 v3, s1
; GCN1-NEXT: v_mov_b32_e32 v2, s0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_mov_b32_e32 v2, s2
; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0)
; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN1-NEXT: s_endpgm
;
; GCN2-NEXT: s_addc_u32 s1, s1, s5
; GCN2-NEXT: v_mov_b32_e32 v3, s1
; GCN2-NEXT: v_mov_b32_e32 v2, s0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_mov_b32_e32 v2, s2
; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0)
; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN2-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64* %out2
ret void
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN1 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN2 %s
+
+define amdgpu_kernel void @atomic_max_i64_offset(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_max_i64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB0_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB0_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_offset(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_max_i64_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s2, s4, 32
+; GCN1-NEXT: s_addc_u32 s3, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v5, s3
+; GCN1-NEXT: v_mov_b32_e32 v4, s2
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB1_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s2, s4, 32
+; GCN2-NEXT: s_addc_u32 s3, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v5, s3
+; GCN2-NEXT: v_mov_b32_e32 v4, s2
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB1_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB2_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB2_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB3_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB3_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_max_i64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB4_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB4_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_max_i64_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v4, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s5
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB5_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v4, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s5
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB5_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB6_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB6_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i64_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB7_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i64_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB7_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_offset(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_umax_i64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB8_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB8_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB8_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB8_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_umax_i64_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s2, s4, 32
+; GCN1-NEXT: s_addc_u32 s3, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB9_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v5, s3
+; GCN1-NEXT: v_mov_b32_e32 v4, s2
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB9_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s2, s4, 32
+; GCN2-NEXT: s_addc_u32 s3, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB9_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v5, s3
+; GCN2-NEXT: v_mov_b32_e32 v4, s2
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB9_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB10_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB10_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB10_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB10_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB11_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB11_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB11_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB11_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_umax_i64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB12_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB12_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB12_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB12_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_umax_i64_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: .LBB13_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v4, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s5
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB13_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: .LBB13_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v4, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s5
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB13_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB14_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB14_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB14_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB14_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i64_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB15_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB15_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i64_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB15_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB15_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_offset(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_min_i64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB16_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB16_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB16_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB16_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_offset(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_min_i64_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s2, s4, 32
+; GCN1-NEXT: s_addc_u32 s3, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB17_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v5, s3
+; GCN1-NEXT: v_mov_b32_e32 v4, s2
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB17_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s2, s4, 32
+; GCN2-NEXT: s_addc_u32 s3, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB17_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v5, s3
+; GCN2-NEXT: v_mov_b32_e32 v4, s2
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB17_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB18_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB18_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB18_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB18_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB19_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB19_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB19_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB19_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_min_i64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB20_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB20_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB20_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB20_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_min_i64_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: .LBB21_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v4, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s5
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB21_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: .LBB21_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v4, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s5
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB21_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB22_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB22_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB22_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB22_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i64_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB23_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB23_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i64_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB23_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB23_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_offset(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_umin_i64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB24_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB24_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB24_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB24_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_umin_i64_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s2, s4, 32
+; GCN1-NEXT: s_addc_u32 s3, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB25_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v5, s3
+; GCN1-NEXT: v_mov_b32_e32 v4, s2
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB25_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s2, s4, 32
+; GCN2-NEXT: s_addc_u32 s3, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB25_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v5, s3
+; GCN2-NEXT: v_mov_b32_e32 v4, s2
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB25_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64* %out, i64 4
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i64_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB26_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB26_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB26_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB26_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i64_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: s_add_u32 s0, s0, 32
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB27_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB27_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: s_add_u32 s0, s0, 32
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB27_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB27_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %gep = getelementptr i64, i64* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64(i64* %out, i64 %in) {
+; GCN1-LABEL: atomic_umin_i64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: .LBB28_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB28_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: .LBB28_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB28_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret(i64* %out, i64* %out2, i64 %in) {
+; GCN1-LABEL: atomic_umin_i64_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: .LBB29_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s1
+; GCN1-NEXT: v_mov_b32_e32 v6, s0
+; GCN1-NEXT: v_mov_b32_e32 v4, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s5
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB29_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v2, s6
+; GCN1-NEXT: v_mov_b32_e32 v3, s7
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: .LBB29_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s1
+; GCN2-NEXT: v_mov_b32_e32 v6, s0
+; GCN2-NEXT: v_mov_b32_e32 v4, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s5
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB29_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v2, s6
+; GCN2-NEXT: v_mov_b32_e32 v3, s7
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_addr64(i64* %out, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i64_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s4
+; GCN1-NEXT: s_addc_u32 s1, s1, s5
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[4:5], 0
+; GCN1-NEXT: .LBB30_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s3
+; GCN1-NEXT: v_mov_b32_e32 v6, s2
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB30_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s4
+; GCN2-NEXT: s_addc_u32 s1, s1, s5
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[4:5], 0
+; GCN2-NEXT: .LBB30_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s3
+; GCN2-NEXT: v_mov_b32_e32 v6, s2
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB30_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i64_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN1-NEXT: s_add_u32 s0, s0, s6
+; GCN1-NEXT: s_addc_u32 s1, s1, s7
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN1-NEXT: s_mov_b64 s[6:7], 0
+; GCN1-NEXT: .LBB31_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v3, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, v0
+; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v0, s5
+; GCN1-NEXT: v_mov_b32_e32 v6, s4
+; GCN1-NEXT: v_mov_b32_e32 v5, s1
+; GCN1-NEXT: v_mov_b32_e32 v4, s0
+; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_execnz .LBB31_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i64_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GCN2-NEXT: s_add_u32 s0, s0, s6
+; GCN2-NEXT: s_addc_u32 s1, s1, s7
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GCN2-NEXT: s_mov_b64 s[6:7], 0
+; GCN2-NEXT: .LBB31_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v3, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, v0
+; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v0, s5
+; GCN2-NEXT: v_mov_b32_e32 v6, s4
+; GCN2-NEXT: v_mov_b32_e32 v5, s1
+; GCN2-NEXT: v_mov_b32_e32 v4, s0
+; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_execnz .LBB31_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GCN2-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64* %out, i64 %index
+ %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64* %out2
+ ret void
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN1 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN2 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN3 %s
+
+define amdgpu_kernel void @atomic_max_i32_offset(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_max_i32_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s2, 16
+; GCN1-NEXT: s_addc_u32 s1, s3, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB0_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s2, 16
+; GCN2-NEXT: s_addc_u32 s1, s3, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB0_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB0_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_max_i32_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s4, 16
+; GCN1-NEXT: s_addc_u32 s1, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB1_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s4, 16
+; GCN2-NEXT: s_addc_u32 s1, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB1_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB1_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i32_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB2_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB2_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB2_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB3_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB3_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB3_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_max_i32:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB4_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB4_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB4_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_max_i32_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v2, s4
+; GCN1-NEXT: v_mov_b32_e32 v3, s5
+; GCN1-NEXT: v_max_i32_e32 v0, s2, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB5_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v2, s4
+; GCN2-NEXT: v_mov_b32_e32 v3, s5
+; GCN2-NEXT: v_max_i32_e32 v0, s2, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB5_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_max_i32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB5_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile max i32* %out, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i32_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB6_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB6_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB6_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_max_i32_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB7_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_max_i32_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB7_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_max_i32_ret_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_max_i32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB7_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_offset(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_umax_i32_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s2, 16
+; GCN1-NEXT: s_addc_u32 s1, s3, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB8_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB8_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s2, 16
+; GCN2-NEXT: s_addc_u32 s1, s3, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB8_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB8_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: .LBB8_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB8_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_umax_i32_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s4, 16
+; GCN1-NEXT: s_addc_u32 s1, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB9_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB9_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s4, 16
+; GCN2-NEXT: s_addc_u32 s1, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB9_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB9_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: .LBB9_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_max_u32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB9_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i32_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB10_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB10_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB10_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB10_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB10_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB10_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB11_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB11_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB11_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB11_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB11_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB11_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_umax_i32:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: .LBB12_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB12_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: .LBB12_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB12_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: .LBB12_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB12_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_umax_i32_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB13_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v2, s4
+; GCN1-NEXT: v_mov_b32_e32 v3, s5
+; GCN1-NEXT: v_max_u32_e32 v0, s2, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB13_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB13_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v2, s4
+; GCN2-NEXT: v_mov_b32_e32 v3, s5
+; GCN2-NEXT: v_max_u32_e32 v0, s2, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB13_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: .LBB13_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_max_u32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB13_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i32_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB14_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB14_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB14_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB14_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB14_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_max_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB14_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umax_i32_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB15_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB15_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umax_i32_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB15_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB15_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umax_i32_ret_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB15_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_max_u32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB15_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_offset(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_min_i32_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s2, 16
+; GCN1-NEXT: s_addc_u32 s1, s3, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB16_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB16_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s2, 16
+; GCN2-NEXT: s_addc_u32 s1, s3, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB16_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB16_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: .LBB16_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB16_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_min_i32_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s4, 16
+; GCN1-NEXT: s_addc_u32 s1, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB17_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB17_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s4, 16
+; GCN2-NEXT: s_addc_u32 s1, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB17_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB17_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: .LBB17_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_min_i32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB17_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i32_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB18_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB18_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB18_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB18_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB18_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB18_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB19_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB19_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB19_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB19_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB19_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB19_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_min_i32:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: .LBB20_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB20_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: .LBB20_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB20_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: .LBB20_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB20_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_min_i32_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB21_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v2, s4
+; GCN1-NEXT: v_mov_b32_e32 v3, s5
+; GCN1-NEXT: v_min_i32_e32 v0, s2, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB21_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB21_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v2, s4
+; GCN2-NEXT: v_mov_b32_e32 v3, s5
+; GCN2-NEXT: v_min_i32_e32 v0, s2, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB21_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: .LBB21_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_min_i32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB21_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile min i32* %out, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i32_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB22_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB22_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB22_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB22_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB22_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_i32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB22_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_min_i32_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB23_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB23_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_min_i32_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB23_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB23_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_min_i32_ret_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB23_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_min_i32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB23_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_offset(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_umin_i32_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s2, 16
+; GCN1-NEXT: s_addc_u32 s1, s3, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB24_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB24_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s2, 16
+; GCN2-NEXT: s_addc_u32 s1, s3, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB24_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB24_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: .LBB24_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB24_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_umin_i32_ret_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_add_u32 s0, s4, 16
+; GCN1-NEXT: s_addc_u32 s1, s5, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB25_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB25_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_ret_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_add_u32 s0, s4, 16
+; GCN2-NEXT: s_addc_u32 s1, s5, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB25_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB25_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_ret_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: .LBB25_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_min_u32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB25_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32* %out, i32 4
+ %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i32_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB26_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB26_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB26_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB26_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB26_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB26_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i32_ret_addr64_offset:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: s_add_u32 s0, s0, 16
+; GCN1-NEXT: s_addc_u32 s1, s1, 0
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB27_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB27_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_ret_addr64_offset:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: s_add_u32 s0, s0, 16
+; GCN2-NEXT: s_addc_u32 s1, s1, 0
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB27_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB27_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_ret_addr64_offset:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB27_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB27_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %gep = getelementptr i32, i32* %ptr, i32 4
+ %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32(i32* %out, i32 %in) {
+; GCN1-LABEL: atomic_umin_i32:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s2
+; GCN1-NEXT: v_mov_b32_e32 v1, s3
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: .LBB28_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v2, s2
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v3, s3
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB28_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s2
+; GCN2-NEXT: v_mov_b32_e32 v1, s3
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: .LBB28_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v2, s2
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v3, s3
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB28_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s2
+; GCN3-NEXT: v_mov_b32_e32 v1, s3
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: .LBB28_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v2, s2
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v3, s3
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB28_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret(i32* %out, i32* %out2, i32 %in) {
+; GCN1-LABEL: atomic_umin_i32_ret:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd
+; GCN1-NEXT: s_mov_b64 s[0:1], 0
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v0, s4
+; GCN1-NEXT: v_mov_b32_e32 v1, s5
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: .LBB29_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v2, s4
+; GCN1-NEXT: v_mov_b32_e32 v3, s5
+; GCN1-NEXT: v_min_u32_e32 v0, s2, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_execnz .LBB29_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_ret:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN2-NEXT: s_mov_b64 s[0:1], 0
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v0, s4
+; GCN2-NEXT: v_mov_b32_e32 v1, s5
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: .LBB29_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v2, s4
+; GCN2-NEXT: v_mov_b32_e32 v3, s5
+; GCN2-NEXT: v_min_u32_e32 v0, s2, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_execnz .LBB29_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_ret:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34
+; GCN3-NEXT: s_mov_b64 s[0:1], 0
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v0, s4
+; GCN3-NEXT: v_mov_b32_e32 v1, s5
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: .LBB29_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v2, s4
+; GCN3-NEXT: v_mov_b32_e32 v3, s5
+; GCN3-NEXT: v_min_u32_e32 v0, s2, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN3-NEXT: s_cbranch_execnz .LBB29_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32* %out, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i32_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s6, s0
+; GCN1-NEXT: s_addc_u32 s1, s7, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v1, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB30_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB30_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s6, s0
+; GCN2-NEXT: s_addc_u32 s1, s7, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v1, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB30_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB30_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s6, s0
+; GCN3-NEXT: s_addc_u32 s1, s7, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v1, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB30_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_min_u32_e32 v0, s4, v1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB30_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) {
+; GCN1-LABEL: atomic_umin_i32_ret_addr64:
+; GCN1: ; %bb.0: ; %entry
+; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd
+; GCN1-NEXT: s_waitcnt lgkmcnt(0)
+; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN1-NEXT: s_add_u32 s0, s4, s0
+; GCN1-NEXT: s_addc_u32 s1, s5, s1
+; GCN1-NEXT: v_mov_b32_e32 v0, s0
+; GCN1-NEXT: v_mov_b32_e32 v1, s1
+; GCN1-NEXT: flat_load_dword v0, v[0:1]
+; GCN1-NEXT: s_mov_b64 s[2:3], 0
+; GCN1-NEXT: .LBB31_1: ; %atomicrmw.start
+; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: v_mov_b32_e32 v1, v0
+; GCN1-NEXT: v_mov_b32_e32 v3, s1
+; GCN1-NEXT: v_mov_b32_e32 v2, s0
+; GCN1-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN1-NEXT: buffer_wbinvl1_vol
+; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_execnz .LBB31_1
+; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN1-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN1-NEXT: v_mov_b32_e32 v1, s6
+; GCN1-NEXT: v_mov_b32_e32 v2, s7
+; GCN1-NEXT: flat_store_dword v[1:2], v0
+; GCN1-NEXT: s_endpgm
+;
+; GCN2-LABEL: atomic_umin_i32_ret_addr64:
+; GCN2: ; %bb.0: ; %entry
+; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN2-NEXT: s_waitcnt lgkmcnt(0)
+; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN2-NEXT: s_add_u32 s0, s4, s0
+; GCN2-NEXT: s_addc_u32 s1, s5, s1
+; GCN2-NEXT: v_mov_b32_e32 v0, s0
+; GCN2-NEXT: v_mov_b32_e32 v1, s1
+; GCN2-NEXT: flat_load_dword v0, v[0:1]
+; GCN2-NEXT: s_mov_b64 s[2:3], 0
+; GCN2-NEXT: .LBB31_1: ; %atomicrmw.start
+; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: v_mov_b32_e32 v1, v0
+; GCN2-NEXT: v_mov_b32_e32 v3, s1
+; GCN2-NEXT: v_mov_b32_e32 v2, s0
+; GCN2-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN2-NEXT: buffer_wbinvl1_vol
+; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_execnz .LBB31_1
+; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN2-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN2-NEXT: v_mov_b32_e32 v1, s6
+; GCN2-NEXT: v_mov_b32_e32 v2, s7
+; GCN2-NEXT: flat_store_dword v[1:2], v0
+; GCN2-NEXT: s_endpgm
+;
+; GCN3-LABEL: atomic_umin_i32_ret_addr64:
+; GCN3: ; %bb.0: ; %entry
+; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34
+; GCN3-NEXT: s_waitcnt lgkmcnt(0)
+; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GCN3-NEXT: s_add_u32 s0, s4, s0
+; GCN3-NEXT: s_addc_u32 s1, s5, s1
+; GCN3-NEXT: v_mov_b32_e32 v0, s0
+; GCN3-NEXT: v_mov_b32_e32 v1, s1
+; GCN3-NEXT: flat_load_dword v0, v[0:1]
+; GCN3-NEXT: s_mov_b64 s[2:3], 0
+; GCN3-NEXT: .LBB31_1: ; %atomicrmw.start
+; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: v_mov_b32_e32 v1, v0
+; GCN3-NEXT: v_mov_b32_e32 v3, s1
+; GCN3-NEXT: v_mov_b32_e32 v2, s0
+; GCN3-NEXT: v_min_u32_e32 v0, s8, v1
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
+; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN3-NEXT: buffer_wbinvl1_vol
+; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_execnz .LBB31_1
+; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
+; GCN3-NEXT: s_or_b64 exec, exec, s[2:3]
+; GCN3-NEXT: v_mov_b32_e32 v1, s6
+; GCN3-NEXT: v_mov_b32_e32 v2, s7
+; GCN3-NEXT: flat_store_dword v[1:2], v0
+; GCN3-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32* %out, i64 %index
+ %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst
+ store i32 %val, i32* %out2
+ ret void
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX10 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11 %s
+
+; Test using saddr addressing mode of global_* flat atomic instructions.
+
+; --------------------------------------------------------------------------------
+; atomicrmw max
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @global_max_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_max_saddr_i32_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_max_saddr_i32_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB0_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_max_saddr_i32_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_max_saddr_i32_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_max_saddr_i32_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB1_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_max_saddr_i32_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @global_max_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_max_saddr_i32_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_max_saddr_i32_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB2_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_max_saddr_i32_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_max_saddr_i32_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_max_saddr_i32_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB3_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_max_saddr_i32_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_max_saddr_i64_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_max_saddr_i64_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB4_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_max_saddr_i64_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_max_saddr_i64_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_max_saddr_i64_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB5_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_max_saddr_i64_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @global_max_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_max_saddr_i64_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_max_saddr_i64_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_max_saddr_i64_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_max_saddr_i64_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_max_saddr_i64_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB7_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_max_saddr_i64_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw min
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @global_min_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_min_saddr_i32_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_min_saddr_i32_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB8_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_min_saddr_i32_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_min_saddr_i32_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_min_saddr_i32_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_min_saddr_i32_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @global_min_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_min_saddr_i32_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_min_saddr_i32_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB10_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_min_saddr_i32_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_min_saddr_i32_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_min_saddr_i32_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB11_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_min_saddr_i32_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_min_i32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB11_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_min_saddr_i64_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_min_saddr_i64_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB12_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_min_saddr_i64_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB12_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_min_saddr_i64_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_min_saddr_i64_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB13_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_min_saddr_i64_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB13_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @global_min_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_min_saddr_i64_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_min_saddr_i64_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB14_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_min_saddr_i64_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB14_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_min_saddr_i64_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_min_saddr_i64_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB15_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_min_saddr_i64_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB15_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw umax
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @global_umax_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umax_saddr_i32_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umax_saddr_i32_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB16_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umax_saddr_i32_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB16_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umax_saddr_i32_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umax_saddr_i32_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB17_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umax_saddr_i32_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB17_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @global_umax_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umax_saddr_i32_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB18_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umax_saddr_i32_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB18_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umax_saddr_i32_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB18_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umax_saddr_i32_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB19_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umax_saddr_i32_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB19_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umax_saddr_i32_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_max_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB19_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umax_saddr_i64_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB20_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umax_saddr_i64_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB20_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umax_saddr_i64_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB20_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umax_saddr_i64_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB21_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umax_saddr_i64_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB21_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umax_saddr_i64_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB21_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @global_umax_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umax_saddr_i64_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB22_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umax_saddr_i64_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB22_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umax_saddr_i64_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB22_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umax_saddr_i64_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB23_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umax_saddr_i64_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB23_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umax_saddr_i64_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB23_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ ret void
+}
+
+; --------------------------------------------------------------------------------
+; atomicrmw umin
+; --------------------------------------------------------------------------------
+
+define amdgpu_ps float @global_umin_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umin_saddr_i32_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB24_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umin_saddr_i32_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB24_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umin_saddr_i32_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB24_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umin_saddr_i32_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB25_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umin_saddr_i32_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v2, v0
+; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB25_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umin_saddr_i32_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v2, v0
+; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB25_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %cast.rtn = bitcast i32 %rtn to float
+ ret float %cast.rtn
+}
+
+define amdgpu_ps void @global_umin_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umin_saddr_i32_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB26_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umin_saddr_i32_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB26_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umin_saddr_i32_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB26_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
+ %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
+; GFX9-LABEL: global_umin_saddr_i32_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB27_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umin_saddr_i32_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB27_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umin_saddr_i32_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_min_u32_e32 v4, v5, v1
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB27_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
+ %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umin_saddr_i64_rtn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB28_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umin_saddr_i64_rtn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB28_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umin_saddr_i64_rtn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB28_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umin_saddr_i64_rtn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB29_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, v4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: global_umin_saddr_i64_rtn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v9, v3
+; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB29_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, v3
+; GFX10-NEXT: v_mov_b32_e32 v1, v4
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: global_umin_saddr_i64_rtn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v10, v4
+; GFX11-NEXT: v_mov_b32_e32 v9, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10]
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB29_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mov_b32_e32 v1, v4
+; GFX11-NEXT: ; return to shader part epilog
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %cast.rtn = bitcast i64 %rtn to <2 x float>
+ ret <2 x float> %cast.rtn
+}
+
+define amdgpu_ps void @global_umin_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umin_saddr_i64_nortn:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB30_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umin_saddr_i64_nortn:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3]
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB30_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umin_saddr_i64_nortn:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3]
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB30_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
+ %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ ret void
+}
+
+define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
+; GFX9-LABEL: global_umin_saddr_i64_nortn_neg128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB31_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: global_umin_saddr_i64_nortn_neg128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128
+; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1]
+; GFX10-NEXT: s_mov_b64 s[0:1], 0
+; GFX10-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: buffer_gl0_inv
+; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_mov_b32_e32 v5, v3
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_execnz .LBB31_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: global_umin_saddr_i64_nortn_neg128:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128
+; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1]
+; GFX11-NEXT: s_mov_b64 s[0:1], 0
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2]
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: buffer_gl0_inv
+; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v5, v3
+; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_execnz .LBB31_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_endpgm
+ %zext.offset = zext i32 %voffset to i64
+ %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
+ %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
+ %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
+ %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ ret void
+}
+
+attributes #0 = { argmemonly nounwind willreturn }
define amdgpu_ps float @global_max_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_max_saddr_i32_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v0, v1, s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_max_saddr_i32_rtn:
; GFX10-NEXT: global_atomic_smax v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_max_saddr_i32_rtn:
; GFX11-NEXT: global_atomic_max_i32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_max_saddr_i32_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v0, v1, s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_max_saddr_i32_rtn_neg128:
; GFX10-NEXT: global_atomic_smax v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_max_saddr_i32_rtn_neg128:
; GFX11-NEXT: global_atomic_max_i32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps void @global_max_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_max_saddr_i32_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_max_saddr_i32_nortn:
; GFX10-NEXT: global_atomic_smax v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_max_saddr_i32_nortn:
; GFX11-NEXT: global_atomic_max_i32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_max_saddr_i32_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_max_saddr_i32_nortn_neg128:
; GFX10-NEXT: global_atomic_smax v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_max_saddr_i32_nortn_neg128:
; GFX11-NEXT: global_atomic_max_i32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_max_saddr_i64_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_max_saddr_i64_rtn:
; GFX10-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_max_saddr_i64_rtn:
; GFX11-NEXT: global_atomic_max_i64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_max_saddr_i64_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_max_saddr_i64_rtn_neg128:
; GFX10-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_max_saddr_i64_rtn_neg128:
; GFX11-NEXT: global_atomic_max_i64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps void @global_max_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_max_saddr_i64_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_max_saddr_i64_nortn:
; GFX10-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_max_saddr_i64_nortn:
; GFX11-NEXT: global_atomic_max_i64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_max_saddr_i64_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_max_saddr_i64_nortn_neg128:
; GFX10-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_max_saddr_i64_nortn_neg128:
; GFX11-NEXT: global_atomic_max_i64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps float @global_min_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_min_saddr_i32_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v0, v1, s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_min_saddr_i32_rtn:
; GFX10-NEXT: global_atomic_smin v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_min_saddr_i32_rtn:
; GFX11-NEXT: global_atomic_min_i32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_min_saddr_i32_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v0, v1, s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_min_saddr_i32_rtn_neg128:
; GFX10-NEXT: global_atomic_smin v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_min_saddr_i32_rtn_neg128:
; GFX11-NEXT: global_atomic_min_i32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps void @global_min_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_min_saddr_i32_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_min_saddr_i32_nortn:
; GFX10-NEXT: global_atomic_smin v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_min_saddr_i32_nortn:
; GFX11-NEXT: global_atomic_min_i32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_min_saddr_i32_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_min_saddr_i32_nortn_neg128:
; GFX10-NEXT: global_atomic_smin v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_min_saddr_i32_nortn_neg128:
; GFX11-NEXT: global_atomic_min_i32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_min_saddr_i64_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_min_saddr_i64_rtn:
; GFX10-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_min_saddr_i64_rtn:
; GFX11-NEXT: global_atomic_min_i64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_min_saddr_i64_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_min_saddr_i64_rtn_neg128:
; GFX10-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_min_saddr_i64_rtn_neg128:
; GFX11-NEXT: global_atomic_min_i64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps void @global_min_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_min_saddr_i64_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_min_saddr_i64_nortn:
; GFX10-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_min_saddr_i64_nortn:
; GFX11-NEXT: global_atomic_min_i64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_min_saddr_i64_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_min_saddr_i64_nortn_neg128:
; GFX10-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_min_saddr_i64_nortn_neg128:
; GFX11-NEXT: global_atomic_min_i64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps float @global_umax_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umax_saddr_i32_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v0, v1, s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umax_saddr_i32_rtn:
; GFX10-NEXT: global_atomic_umax v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umax_saddr_i32_rtn:
; GFX11-NEXT: global_atomic_max_u32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umax_saddr_i32_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v0, v1, s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umax_saddr_i32_rtn_neg128:
; GFX10-NEXT: global_atomic_umax v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umax_saddr_i32_rtn_neg128:
; GFX11-NEXT: global_atomic_max_u32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps void @global_umax_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umax_saddr_i32_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umax_saddr_i32_nortn:
; GFX10-NEXT: global_atomic_umax v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umax_saddr_i32_nortn:
; GFX11-NEXT: global_atomic_max_u32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umax_saddr_i32_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umax_saddr_i32_nortn_neg128:
; GFX10-NEXT: global_atomic_umax v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umax_saddr_i32_nortn_neg128:
; GFX11-NEXT: global_atomic_max_u32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umax_saddr_i64_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umax_saddr_i64_rtn:
; GFX10-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umax_saddr_i64_rtn:
; GFX11-NEXT: global_atomic_max_u64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umax_saddr_i64_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umax_saddr_i64_rtn_neg128:
; GFX10-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umax_saddr_i64_rtn_neg128:
; GFX11-NEXT: global_atomic_max_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps void @global_umax_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umax_saddr_i64_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umax_saddr_i64_nortn:
; GFX10-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umax_saddr_i64_nortn:
; GFX11-NEXT: global_atomic_max_u64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umax_saddr_i64_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umax_saddr_i64_nortn_neg128:
; GFX10-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umax_saddr_i64_nortn_neg128:
; GFX11-NEXT: global_atomic_max_u64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps float @global_umin_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umin_saddr_i32_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v0, v1, s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umin_saddr_i32_rtn:
; GFX10-NEXT: global_atomic_umin v0, v0, v1, s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umin_saddr_i32_rtn:
; GFX11-NEXT: global_atomic_min_u32 v0, v0, v1, s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umin_saddr_i32_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v0, v1, s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umin_saddr_i32_rtn_neg128:
; GFX10-NEXT: global_atomic_umin v0, v0, v1, s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umin_saddr_i32_rtn_neg128:
; GFX11-NEXT: global_atomic_min_u32 v0, v0, v1, s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
define amdgpu_ps void @global_umin_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umin_saddr_i32_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umin_saddr_i32_nortn:
; GFX10-NEXT: global_atomic_umin v0, v1, s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umin_saddr_i32_nortn:
; GFX11-NEXT: global_atomic_min_u32 v0, v1, s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)*
- %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst
+ %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) {
; GFX9-LABEL: global_umin_saddr_i32_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umin_saddr_i32_nortn_neg128:
; GFX10-NEXT: global_atomic_umin v0, v1, s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umin_saddr_i32_nortn_neg128:
; GFX11-NEXT: global_atomic_min_u32 v0, v1, s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)*
- %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst
+ %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umin_saddr_i64_rtn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umin_saddr_i64_rtn:
; GFX10-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umin_saddr_i64_rtn:
; GFX11-NEXT: global_atomic_min_u64 v[0:1], v0, v[1:2], s[2:3] glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umin_saddr_i64_rtn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_umin_saddr_i64_rtn_neg128:
; GFX10-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_umin_saddr_i64_rtn_neg128:
; GFX11-NEXT: global_atomic_min_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
define amdgpu_ps void @global_umin_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umin_saddr_i64_nortn:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umin_saddr_i64_nortn:
; GFX10-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umin_saddr_i64_nortn:
; GFX11-NEXT: global_atomic_min_u64 v0, v[1:2], s[2:3]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)*
- %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst
+ %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) {
; GFX9-LABEL: global_umin_saddr_i64_nortn_neg128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] offset:-128
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_umin_saddr_i64_nortn_neg128:
; GFX10-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] offset:-128
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_umin_saddr_i64_nortn_neg128:
; GFX11-NEXT: global_atomic_min_u64 v0, v[1:2], s[2:3] offset:-128
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
%cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)*
- %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst
+ %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
; SI-LABEL: atomic_max_i32_offset:
; SI: ; %bb.0: ; %entry
-; SI-NEXT: s_load_dword s4, s[0:1], 0xb
-; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
-; SI-NEXT: s_mov_b32 s3, 0xf000
-; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: s_load_dword s3, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB27_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB27_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_offset:
; VI: ; %bb.0: ; %entry
-; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
-; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_mov_b32 s3, 0xf000
-; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: s_load_dword s2, s[8:9], 0x10
+; VI-NEXT: s_add_u32 s0, s8, 16
+; VI-NEXT: s_addc_u32 s1, s9, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB27_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB27_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_offset:
; GFX9: ; %bb.0: ; %entry
-; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_i32_e32 v0, s4, v1
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] offset:16
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB27_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_addr64:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_offset:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_offset:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_addr64:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_offset:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_offset:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_addr64:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_offset:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_offset:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
- %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64 offset:16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1] offset:16
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
- %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32:
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_addr64:
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin v[0:1], v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
-; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s8
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
- %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, i32 addrspace(1)* %out2
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i64_offset:
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 offset:32
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i64_offset:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s9
; CI-NEXT: s_mov_b32 s6, s2
; CI-NEXT: s_mov_b32 s7, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i64_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i64_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s5, s1
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i64:
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i64:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s1, s5
; CI-NEXT: v_mov_b32_e32 v0, s8
; CI-NEXT: v_mov_b32_e32 v1, s9
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 glc
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_mov_b32 s0, s6
; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s9
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[4:7], 0 addr64
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i64_addr64:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i64_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i64_offset:
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 offset:32
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i64_offset:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s9
; CI-NEXT: s_mov_b32 s6, s2
; CI-NEXT: s_mov_b32 s7, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i64_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i64_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s5, s1
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i64:
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i64:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s1, s5
; CI-NEXT: v_mov_b32_e32 v0, s8
; CI-NEXT: v_mov_b32_e32 v1, s9
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 glc
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_mov_b32 s0, s6
; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s9
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[4:7], 0 addr64
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i64_addr64:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i64_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i64_offset:
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 offset:32
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i64_offset:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s9
; CI-NEXT: s_mov_b32 s6, s2
; CI-NEXT: s_mov_b32 s7, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i64_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i64_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s5, s1
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i64:
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i64:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s1, s5
; CI-NEXT: v_mov_b32_e32 v0, s8
; CI-NEXT: v_mov_b32_e32 v1, s9
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 glc
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_mov_b32 s0, s6
; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s9
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[4:7], 0 addr64
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i64_addr64:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i64_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i64_offset:
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 offset:32
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i64_offset:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: v_mov_b32_e32 v1, s9
; CI-NEXT: s_mov_b32 s6, s2
; CI-NEXT: s_mov_b32 s7, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 offset:32 glc
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i64_addr64_offset:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i64_addr64_offset:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] offset:32
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
%gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s5, s1
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i64:
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i64:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s1, s5
; CI-NEXT: v_mov_b32_e32 v0, s8
; CI-NEXT: v_mov_b32_e32 v1, s9
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 glc
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_mov_b32 s0, s6
; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s9
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
; CI-NEXT: s_mov_b32 s7, 0xf000
; CI-NEXT: s_mov_b32 s6, 0
; CI-NEXT: v_mov_b32_e32 v2, s0
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[4:7], 0 addr64
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i64_addr64:
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i64_addr64:
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: s_addc_u32 s1, s5, s1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1]
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
ret void
}
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: s_mov_b32 s3, s11
; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
; CI-NEXT: s_endpgm
;
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
; GFX9-NEXT: s_add_u32 s0, s0, s4
; GFX9-NEXT: s_addc_u32 s1, s1, s5
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
- %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst
store i64 %tmp0, i64 addrspace(1)* %out2
ret void
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=bonaire -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=CI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizations=false -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
+
+define amdgpu_kernel void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_max_i64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB0_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB0_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20
+; VI-NEXT: s_add_u32 s4, s0, 32
+; VI-NEXT: s_addc_u32 s5, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s10
+; VI-NEXT: v_mov_b32_e32 v3, s11
+; VI-NEXT: .LBB0_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB0_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_max_i64_ret_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB1_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB1_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s0, s4, 32
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB1_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB1_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_max_i64_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB2_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB2_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s4, s4, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB2_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB2_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_max_i64_ret_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB3_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB3_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s6, s0, s6
+; VI-NEXT: s_addc_u32 s7, s1, s7
+; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20
+; VI-NEXT: s_add_u32 s8, s6, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s9, s7, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB3_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB3_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_max_i64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB4_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB4_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT: s_mov_b32 s6, s2
+; VI-NEXT: s_mov_b32 s7, s3
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB4_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s7
+; VI-NEXT: v_mov_b32_e32 v4, s6
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB4_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_max_i64_ret:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB5_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB5_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB5_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB5_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_max_i64_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB6_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB6_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB6_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB6_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_max_i64_ret_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB7_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB7_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i64_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s8, s0, s6
+; VI-NEXT: s_addc_u32 s9, s1, s7
+; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s6
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: .LBB7_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB7_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i64_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_umax_i64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB8_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB8_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20
+; VI-NEXT: s_add_u32 s4, s0, 32
+; VI-NEXT: s_addc_u32 s5, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s10
+; VI-NEXT: v_mov_b32_e32 v3, s11
+; VI-NEXT: .LBB8_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB8_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_umax_i64_ret_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB9_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB9_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s0, s4, 32
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB9_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB9_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umax_i64_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB10_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB10_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s4, s4, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB10_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB10_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umax_i64_ret_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB11_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB11_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s6, s0, s6
+; VI-NEXT: s_addc_u32 s7, s1, s7
+; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20
+; VI-NEXT: s_add_u32 s8, s6, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s9, s7, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB11_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB11_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_umax_i64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB12_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB12_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT: s_mov_b32 s6, s2
+; VI-NEXT: s_mov_b32 s7, s3
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB12_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s7
+; VI-NEXT: v_mov_b32_e32 v4, s6
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB12_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_umax_i64_ret:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB13_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB13_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB13_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB13_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umax_i64_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB14_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB14_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB14_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB14_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umax_i64_ret_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB15_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB15_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i64_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s8, s0, s6
+; VI-NEXT: s_addc_u32 s9, s1, s7
+; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s6
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: .LBB15_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB15_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i64_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_min_i64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB16_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB16_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20
+; VI-NEXT: s_add_u32 s4, s0, 32
+; VI-NEXT: s_addc_u32 s5, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s10
+; VI-NEXT: v_mov_b32_e32 v3, s11
+; VI-NEXT: .LBB16_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB16_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_min_i64_ret_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB17_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB17_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s0, s4, 32
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB17_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB17_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_min_i64_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB18_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB18_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s4, s4, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB18_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB18_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB18_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_min_i64_ret_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB19_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB19_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s6, s0, s6
+; VI-NEXT: s_addc_u32 s7, s1, s7
+; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20
+; VI-NEXT: s_add_u32 s8, s6, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s9, s7, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB19_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB19_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB19_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_min_i64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB20_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB20_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT: s_mov_b32 s6, s2
+; VI-NEXT: s_mov_b32 s7, s3
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB20_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s7
+; VI-NEXT: v_mov_b32_e32 v4, s6
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB20_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB20_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_min_i64_ret:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB21_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB21_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB21_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB21_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB21_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_min_i64_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB22_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB22_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB22_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB22_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB22_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_min_i64_ret_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB23_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB23_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i64_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s8, s0, s6
+; VI-NEXT: s_addc_u32 s9, s1, s7
+; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s6
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: .LBB23_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB23_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i64_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB23_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_umin_i64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB24_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB24_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20
+; VI-NEXT: s_add_u32 s4, s0, 32
+; VI-NEXT: s_addc_u32 s5, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s10
+; VI-NEXT: v_mov_b32_e32 v3, s11
+; VI-NEXT: .LBB24_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB24_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB24_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_umin_i64_ret_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB25_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB25_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s0, s4, 32
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB25_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB25_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB25_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umin_i64_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB26_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB26_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; VI-NEXT: s_add_u32 s4, s4, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s5, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB26_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB26_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB26_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umin_i64_ret_addr64_offset:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB27_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB27_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s6, s0, s6
+; VI-NEXT: s_addc_u32 s7, s1, s7
+; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20
+; VI-NEXT: s_add_u32 s8, s6, 32
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s9, s7, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB27_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB27_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB27_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) {
+; CI-LABEL: atomic_umin_i64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b64 s[4:5], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; CI-NEXT: s_mov_b32 s6, s2
+; CI-NEXT: s_mov_b32 s7, s3
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB28_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s7
+; CI-NEXT: v_mov_b32_e32 v4, s6
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; CI-NEXT: s_cbranch_execnz .LBB28_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; VI-NEXT: s_mov_b32 s6, s2
+; VI-NEXT: s_mov_b32 s7, s3
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB28_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s7
+; VI-NEXT: v_mov_b32_e32 v4, s6
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB28_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s7
+; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v5, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB28_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) {
+; CI-LABEL: atomic_umin_i64_ret:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
+; CI-NEXT: s_mov_b64 s[10:11], 0
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s12
+; CI-NEXT: v_mov_b32_e32 v3, s13
+; CI-NEXT: .LBB29_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s9
+; CI-NEXT: v_mov_b32_e32 v4, s8
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CI-NEXT: s_cbranch_execnz .LBB29_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[10:11]
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: s_mov_b32 s0, s6
+; CI-NEXT: s_mov_b32 s1, s7
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[10:11], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s12
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: .LBB29_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s9
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; VI-NEXT: s_cbranch_execnz .LBB29_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[10:11]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB29_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umin_i64_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
+; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; CI-NEXT: s_add_u32 s4, s0, s4
+; CI-NEXT: s_addc_u32 s5, s1, s5
+; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s8
+; CI-NEXT: v_mov_b32_e32 v3, s9
+; CI-NEXT: .LBB30_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB30_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
+; VI-NEXT: s_add_u32 s4, s0, s4
+; VI-NEXT: s_addc_u32 s5, s1, s5
+; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s8
+; VI-NEXT: v_mov_b32_e32 v3, s9
+; VI-NEXT: .LBB30_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s3
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB30_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s7
+; GFX9-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB30_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) {
+; CI-LABEL: atomic_umin_i64_ret_addr64:
+; CI: ; %bb.0: ; %entry
+; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
+; CI-NEXT: s_mov_b32 s11, 0xf000
+; CI-NEXT: s_mov_b32 s10, -1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; CI-NEXT: s_add_u32 s8, s0, s6
+; CI-NEXT: s_addc_u32 s9, s1, s7
+; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; CI-NEXT: s_mov_b64 s[0:1], 0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: v_mov_b32_e32 v3, s7
+; CI-NEXT: .LBB31_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s5
+; CI-NEXT: v_mov_b32_e32 v4, s4
+; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; CI-NEXT: v_mov_b32_e32 v7, v3
+; CI-NEXT: v_mov_b32_e32 v6, v2
+; CI-NEXT: v_mov_b32_e32 v5, v1
+; CI-NEXT: v_mov_b32_e32 v4, v0
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v2, v4
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_mov_b32_e32 v3, v5
+; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; CI-NEXT: s_cbranch_execnz .LBB31_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[0:1]
+; CI-NEXT: s_mov_b32 s7, 0xf000
+; CI-NEXT: s_mov_b32 s6, -1
+; CI-NEXT: s_mov_b32 s4, s2
+; CI-NEXT: s_mov_b32 s5, s3
+; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; CI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i64_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s11, 0xf000
+; VI-NEXT: s_mov_b32 s10, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; VI-NEXT: s_add_u32 s8, s0, s6
+; VI-NEXT: s_addc_u32 s9, s1, s7
+; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s6
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: .LBB31_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s5
+; VI-NEXT: v_mov_b32_e32 v4, s4
+; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; VI-NEXT: v_mov_b32_e32 v7, v3
+; VI-NEXT: v_mov_b32_e32 v6, v2
+; VI-NEXT: v_mov_b32_e32 v5, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v2, v4
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v3, v5
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB31_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[0:1]
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s4, s2
+; VI-NEXT: s_mov_b32 s5, s3
+; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i64_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX9-NEXT: s_add_u32 s0, s0, s6
+; GFX9-NEXT: s_addc_u32 s1, s1, s7
+; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6]
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
+; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_execnz .LBB31_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index
+ %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst
+ store i64 %tmp0, i64 addrspace(1)* %out2
+ ret void
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizations=false -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
+
+define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_max_i32_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB0_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB0_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[8:9], 0x10
+; VI-NEXT: s_add_u32 s0, s8, 16
+; VI-NEXT: s_addc_u32 s1, s9, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB0_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB0_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB0_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_max_i32_ret_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB1_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB1_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x10
+; VI-NEXT: s_add_u32 s0, s4, 16
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB1_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB1_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_i32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB1_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_max_i32_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB2_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB2_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB2_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB2_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB2_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_max_i32_ret_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB3_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB3_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB3_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB3_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_i32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB3_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_max_i32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB4_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB4_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s3, s[4:5], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: .LBB4_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s2, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB4_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB4_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_max_i32_ret:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB5_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB5_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB5_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB5_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_i32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB5_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_max_i32_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB6_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB6_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB6_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB6_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_max_i32_ret_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB7_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_i32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB7_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_max_i32_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB7_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_i32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB7_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_max_i32_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_i32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_umax_i32_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB8_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB8_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[8:9], 0x10
+; VI-NEXT: s_add_u32 s0, s8, 16
+; VI-NEXT: s_addc_u32 s1, s9, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB8_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB8_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_umax_i32_ret_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB9_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB9_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x10
+; VI-NEXT: s_add_u32 s0, s4, 16
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB9_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB9_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_u32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i32_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB10_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB10_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB10_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB10_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i32_ret_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB11_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB11_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB11_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB11_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_u32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_umax_i32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB12_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB12_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s3, s[4:5], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: .LBB12_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s2, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB12_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_umax_i32_ret:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB13_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB13_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB13_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB13_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_u32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i32_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB14_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB14_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB14_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB14_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umax_i32_ret_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB15_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_max_u32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB15_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umax_i32_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB15_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_max_u32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB15_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umax_i32_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_max_u32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_min_i32_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB16_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB16_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[8:9], 0x10
+; VI-NEXT: s_add_u32 s0, s8, 16
+; VI-NEXT: s_addc_u32 s1, s9, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB16_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB16_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_min_i32_ret_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB17_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB17_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x10
+; VI-NEXT: s_add_u32 s0, s4, 16
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB17_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB17_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_i32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_min_i32_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB18_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB18_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB18_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB18_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB18_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_min_i32_ret_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB19_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB19_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB19_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB19_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_i32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB19_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_min_i32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB20_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB20_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s3, s[4:5], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: .LBB20_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s2, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB20_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB20_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_min_i32_ret:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB21_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB21_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB21_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB21_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_i32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB21_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_min_i32_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB22_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB22_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB22_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB22_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_i32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB22_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_min_i32_ret_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB23_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_i32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB23_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_min_i32_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB23_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_i32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB23_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_min_i32_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_i32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB23_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_umin_i32_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB24_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB24_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[8:9], 0x10
+; VI-NEXT: s_add_u32 s0, s8, 16
+; VI-NEXT: s_addc_u32 s1, s9, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB24_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB24_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB24_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_umin_i32_ret_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x4
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB25_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB25_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_ret_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x10
+; VI-NEXT: s_add_u32 s0, s4, 16
+; VI-NEXT: s_addc_u32 s1, s5, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB25_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB25_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_ret_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_u32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB25_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umin_i32_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB26_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB26_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB26_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB26_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB26_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umin_i32_ret_addr64_offset:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x4
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB27_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB27_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_ret_addr64_offset:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x10
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB27_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB27_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_ret_addr64_offset:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_u32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB27_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
+ %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
+; SI-LABEL: atomic_umin_i32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s2, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s3, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[0:1], 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: .LBB28_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s2, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; SI-NEXT: s_cbranch_execnz .LBB28_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s3, s[4:5], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: .LBB28_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s2, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; VI-NEXT: s_cbranch_execnz .LBB28_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB28_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
+; SI-LABEL: atomic_umin_i32_ret:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s10, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0x0
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB29_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s10, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; SI-NEXT: s_cbranch_execnz .LBB29_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_ret:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s10, s[0:1], 0x34
+; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[0:1], s[4:5]
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB29_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s10, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; VI-NEXT: s_cbranch_execnz .LBB29_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[8:9]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_ret:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_u32_e32 v2, s2, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB29_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umin_i32_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
+; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
+; SI-NEXT: s_load_dword s6, s[0:1], 0xb
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB30_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s6, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB30_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; VI-NEXT: s_load_dword s6, s[0:1], 0x2c
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB30_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s6, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB30_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s6, s0
+; GFX9-NEXT: s_addc_u32 s1, s7, s1
+; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_min_u32_e32 v0, s4, v1
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB30_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ ret void
+}
+
+define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
+; SI-LABEL: atomic_umin_i32_ret_addr64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf
+; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT: s_load_dword s8, s[0:1], 0xd
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; SI-NEXT: s_add_u32 s0, s4, s0
+; SI-NEXT: s_addc_u32 s1, s5, s1
+; SI-NEXT: s_load_dword s2, s[0:1], 0x0
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB31_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_min_u32_e32 v0, s8, v1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB31_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_or_b64 exec, exec, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_mov_b32 s0, s6
+; SI-NEXT: s_mov_b32 s1, s7
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_umin_i32_ret_addr64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT: s_load_dword s8, s[0:1], 0x34
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; VI-NEXT: s_add_u32 s0, s4, s0
+; VI-NEXT: s_addc_u32 s1, s5, s1
+; VI-NEXT: s_load_dword s2, s[0:1], 0x0
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB31_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_min_u32_e32 v0, s8, v1
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB31_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_mov_b32 s0, s6
+; VI-NEXT: s_mov_b32 s1, s7
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_umin_i32_ret_addr64:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2
+; GFX9-NEXT: s_add_u32 s0, s4, s0
+; GFX9-NEXT: s_addc_u32 s1, s5, s1
+; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-NEXT: v_min_u32_e32 v2, s8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB31_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: global_store_dword v1, v0, s[6:7]
+; GFX9-NEXT: s_endpgm
+entry:
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
+ store i32 %val, i32 addrspace(1)* %out2
+ ret void
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; XUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI %s
+
+; FIXME: broken on VI because flat instructions need to be emitted
+; instead of addr64 equivalent of the _OFFSET variants.
+
+; Check that moving the pointer out of the resource descriptor to
+; vaddr works for atomics.
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
+; GCN-LABEL: atomic_max_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[8:11], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
+; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GCN-NEXT: s_cbranch_execz .LBB0_4
+; GCN-NEXT: ; %bb.1: ; %atomic
+; GCN-NEXT: s_mov_b32 s8, s10
+; GCN-NEXT: s_mov_b32 s9, s10
+; GCN-NEXT: buffer_load_dword v4, v[1:2], s[8:11], 0 addr64 offset:400
+; GCN-NEXT: s_load_dword s2, s[0:1], 0xf
+; GCN-NEXT: s_mov_b64 s[0:1], 0
+; GCN-NEXT: .LBB0_2: ; %atomicrmw.start
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_i32_e32 v3, s2, v4
+; GCN-NEXT: s_waitcnt expcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v6, v4
+; GCN-NEXT: v_mov_b32_e32 v5, v3
+; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_atomic_cmpswap v[5:6], v[1:2], s[8:11], 0 addr64 offset:400 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_wbinvl1
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
+; GCN-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v4, v5
+; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN-NEXT: s_cbranch_execnz .LBB0_2
+; GCN-NEXT: ; %bb.3: ; %atomicrmw.end
+; GCN-NEXT: s_or_b64 exec, exec, s[0:1]
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: buffer_store_dword v5, off, s[4:7], 0
+; GCN-NEXT: .LBB0_4: ; %exit
+; GCN-NEXT: s_endpgm
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
+ %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
+ %xor = xor i32 %tid, 1
+ %cmp = icmp ne i32 %xor, 0
+ br i1 %cmp, label %atomic, label %exit
+
+atomic:
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
+ store i32 %ret, ptr addrspace(1) %out
+ br label %exit
+
+exit:
+ ret void
+}
+
+define amdgpu_kernel void @atomic_max_i32_noret(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
+; GCN-LABEL: atomic_max_i32_noret:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; GCN-NEXT: v_mov_b32_e32 v2, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[4:7], 0 addr64 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
+; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GCN-NEXT: s_cbranch_execz .LBB1_3
+; GCN-NEXT: ; %bb.1: ; %atomic
+; GCN-NEXT: s_mov_b32 s4, s6
+; GCN-NEXT: s_mov_b32 s5, s6
+; GCN-NEXT: buffer_load_dword v4, v[1:2], s[4:7], 0 addr64 offset:400
+; GCN-NEXT: s_load_dword s2, s[0:1], 0xf
+; GCN-NEXT: s_mov_b64 s[0:1], 0
+; GCN-NEXT: .LBB1_2: ; %atomicrmw.start
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_max_i32_e32 v3, s2, v4
+; GCN-NEXT: s_waitcnt expcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v6, v4
+; GCN-NEXT: v_mov_b32_e32 v5, v3
+; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_atomic_cmpswap v[5:6], v[1:2], s[4:7], 0 addr64 offset:400 glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_wbinvl1
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
+; GCN-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GCN-NEXT: v_mov_b32_e32 v4, v5
+; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GCN-NEXT: s_cbranch_execnz .LBB1_2
+; GCN-NEXT: .LBB1_3: ; %exit
+; GCN-NEXT: s_endpgm
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
+ %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
+ %xor = xor i32 %tid, 1
+ %cmp = icmp ne i32 %xor, 0
+ br i1 %cmp, label %atomic, label %exit
+
+atomic:
+ %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
+ br label %exit
+
+exit:
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
; GCN-NEXT: s_mov_b32 s7, s11
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[8:11], 0 addr64 offset:400 glc
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: buffer_wbinvl1
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-NEXT: .LBB0_2: ; %exit
; GCN-NEXT: s_endpgm
atomic:
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
- %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
store i32 %ret, ptr addrspace(1) %out
br label %exit
; GCN-NEXT: s_mov_b32 s5, s6
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[4:7], 0 addr64 offset:400
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: buffer_wbinvl1
; GCN-NEXT: .LBB1_2: ; %exit
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
atomic:
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
- %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
br label %exit
exit: