SGPR104, SGPR105
]>>>,
- // We have no way of referring to the generated register tuples
- // here, so use a custom function.
- CCIfInReg<CCIfType<[i64], CCCustom<"allocateSGPRTuple">>>,
- CCIfByVal<CCIfType<[i64], CCCustom<"allocateSGPRTuple">>>,
-
// 32*4 + 4 is the minimum for a fetch shader consumer with 32 inputs.
CCIfNotInReg<CCIfType<[f32, i32, f16, v2i16, v2f16] , CCAssignToReg<[
VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
- CCIfType<[i64, f64, v2i32, v2f32, v3i32, v3f32, v4i32, v4f32, v5i32, v5f32, v8i32, v8f32, v16i32, v16f32, v2i64, v2f64, v4i16, v4f16], CCCustom<"allocateVGPRTuple">>,
CCIfType<[i32, f32, v2i16, v2f16, i16, f16, i1], CCAssignToStack<4, 4>>,
CCIfType<[i64, f64, v2i32, v2f32], CCAssignToStack<8, 4>>,
CCIfType<[v3i32, v3f32], CCAssignToStack<12, 4>>,
VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
- CCIfType<[i64, f64, v2i32, v2f32, v4i32, v4f32, v8i32, v8f32, v16i32, v16f32, v2i64, v2f64, v4i16, v4f16], CCCustom<"allocateVGPRTuple">>
]>;
def CC_AMDGPU : CallingConv<[
#include "llvm/Support/KnownBits.h"
using namespace llvm;
-static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State,
- const TargetRegisterClass *RC,
- unsigned NumRegs) {
- ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs);
- unsigned RegResult = State.AllocateReg(RegList);
- if (RegResult == AMDGPU::NoRegister)
- return false;
-
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo));
- return true;
-}
-
-static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- switch (LocVT.SimpleTy) {
- case MVT::i64:
- case MVT::f64:
- case MVT::v2i32:
- case MVT::v2f32:
- case MVT::v4i16:
- case MVT::v4f16: {
- // Up to SGPR0-SGPR105
- return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
- &AMDGPU::SGPR_64RegClass, 53);
- }
- default:
- return false;
- }
-}
-
-// Allocate up to VGPR31.
-//
-// TODO: Since there are no VGPR alignent requirements would it be better to
-// split into individual scalar registers?
-static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- switch (LocVT.SimpleTy) {
- case MVT::i64:
- case MVT::f64:
- case MVT::v2i32:
- case MVT::v2f32:
- case MVT::v4i16:
- case MVT::v4f16: {
- return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
- &AMDGPU::VReg_64RegClass, 31);
- }
- case MVT::v4i32:
- case MVT::v4f32:
- case MVT::v2i64:
- case MVT::v2f64: {
- return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
- &AMDGPU::VReg_128RegClass, 29);
- }
- case MVT::v8i32:
- case MVT::v8f32: {
- return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
- &AMDGPU::VReg_256RegClass, 25);
-
- }
- case MVT::v16i32:
- case MVT::v16f32: {
- return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
- &AMDGPU::VReg_512RegClass, 17);
-
- }
- default:
- return false;
- }
-}
-
#include "AMDGPUGenCallingConv.inc"
// Find a larger type to do a load / store of a vector with.
MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
- // TODO: Consider splitting all arguments into 32-bit pieces.
- if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
+ if (CC == CallingConv::AMDGPU_KERNEL)
+ return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+
+ if (VT.isVector()) {
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 32)
return ScalarVT.getSimpleVT();
- if (Size == 64)
+ if (Size > 32)
return MVT::i32;
if (Size == 16 && Subtarget->has16BitInsts())
return VT.isInteger() ? MVT::v2i16 : MVT::v2f16;
- }
+ } else if (VT.getSizeInBits() > 32)
+ return MVT::i32;
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
- if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) {
+ if (CC == CallingConv::AMDGPU_KERNEL)
+ return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+
+ if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements();
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 32)
return NumElts;
- if (Size == 64)
- return 2 * NumElts;
+ if (Size > 32)
+ return NumElts * ((Size + 31) / 32);
if (Size == 16 && Subtarget->has16BitInsts())
- return (VT.getVectorNumElements() + 1) / 2;
- }
+ return (NumElts + 1) / 2;
+ } else if (VT.getSizeInBits() > 32)
+ return (VT.getSizeInBits() + 31) / 32;
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
return NumIntermediates;
}
- if (Size == 64) {
+ if (Size > 32) {
RegisterVT = MVT::i32;
IntermediateVT = RegisterVT;
- NumIntermediates = 2 * NumElts;
+ NumIntermediates = NumElts * ((Size + 31) / 32);
return NumIntermediates;
}
+; XFAIL: *
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=irtranslator -global-isel %s -o - | FileCheck %s
}
; GCN-LABEL: {{^}}test_call_external_void_func_i64_imm:
-; GCN-DAG: s_movk_i32 [[K0:s[0-9]+]], 0x7b{{$}}
-; GCN-DAG: s_mov_b32 [[K1:s[0-9]+]], 0{{$}}
-; GCN-DAG: v_mov_b32_e32 v0, [[K0]]
+; GCN-DAG: v_mov_b32_e32 v0, 0x7b{{$}}
+; GCN-DAG: v_mov_b32_e32 v1, 0{{$}}
; GCN-DAG: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
; GCN-DAG: s_add_u32 s[[PC_LO]], s[[PC_LO]], external_void_func_i64@rel32@lo+4
; GCN-DAG: s_addc_u32 s[[PC_HI]], s[[PC_HI]], external_void_func_i64@rel32@hi+4
-; GCN-DAG: v_mov_b32_e32 v1, [[K1]]
; GCN: s_swappc_b64 s[30:31], s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i64_imm() #0 {
}
; GCN-LABEL: {{^}}stack_passed_arg_alignment_v32i32_f64:
-; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s32 offset:4
; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s32{{$}}
+; GCN: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s32 offset:4
; GCN: s_swappc_b64
define amdgpu_kernel void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val, double %tmp) #0 {
entry:
; GCN-NOT: s32
; GCN: buffer_store_dword v32, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GCN: buffer_store_dword v33, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GCN: buffer_load_dword v32, off, s[0:3], s32{{$}}
-; GCN: buffer_load_dword v33, off, s[0:3], s32 offset:4
+; GCN: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GCN: buffer_load_dword v33, off, s[0:3], s32{{$}}
; GCN: s_getpc_b64
-; GCN: buffer_store_dword v32, off, s[0:3], s32{{$}}
-; GCN: buffer_store_dword v33, off, s[0:3], s32 offset:4
+; GCN: buffer_store_dword v33, off, s[0:3], s32{{$}}
+; GCN: buffer_store_dword v32, off, s[0:3], s32 offset:4
; GCN: buffer_load_dword v33, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
; GCN: buffer_load_dword v32, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
; GCN-NOT: s32
define <4 x float> @Scene_transformT(i32 %subshapeIdx, <4 x float> %v, float %time, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets) local_unnamed_addr !dbg !110 {
entry:
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr6_vgpr7
+ ; CHECK: v_mov_b32_e32 v[[COPIED_ARG_PIECE:[0-9]+]], v9
+
+ ; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 0 32] $vgpr6
+ ; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 32 32] $vgpr7
call void @llvm.dbg.value(metadata i8 addrspace(1)* %gScene, metadata !120, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !154
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr8_vgpr9
+ ; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 0 32] $vgpr8
+ ; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 32 32] $vgpr[[COPIED_ARG_PIECE]]
call void @llvm.dbg.value(metadata i32 addrspace(1)* %gSceneOffsets, metadata !121, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !155
%call = tail call %struct.ShapeData addrspace(1)* @Scene_getSubShapeData(i32 %subshapeIdx, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets)
%m_linearMotion = getelementptr inbounds %struct.ShapeData, %struct.ShapeData addrspace(1)* %call, i64 0, i32 2
; RUN: llc -march=amdgcn -stop-after=amdgpu-isel -verify-machineinstrs -o - %s | FileCheck %s
; CHECK-LABEL: vcopy_i1_undef
-; CHECK: IMPLICIT_DEF
-; CHECK-NOT: COPY
-; CHECK: IMPLICIT_DEF
+; CHECK: [[IMPDEF0:%[0-9]+]]:vreg_1 = IMPLICIT_DEF
; CHECK-NOT: COPY
+; CHECK: [[IMPDEF1:%[0-9]+]]:vreg_1 = IMPLICIT_DEF
+; CHECK-NOT: COPY [[IMPDEF0]]
+; CHECK-NOT: COPY [[IMPDEF1]]
; CHECK: .false:
define <2 x float> @vcopy_i1_undef(<2 x float> addrspace(1)* %p) {
entry:
; GCN-LABEL: {{^}}v2i16_to_i64:
; GFX9: s_waitcnt
-; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
-; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_pk_add_u16 v1, v0, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX9-NEXT: s_setpc_b64
define i64 @v2i16_to_i64(<2 x i16> %x, <2 x i16> %y) {
%x.add = add <2 x i16> %x, %y
; GCN-NEXT: v_lshl_b64 v[5:6], v[0:1], v5
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
; GCN-NEXT: v_lshl_b64 v[0:1], v[0:1], v4
-; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v3, v6, v3, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v8, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v3, v5, v3, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = shl i128 %lhs, %rhs
ret i128 %shl
; GCN-NEXT: v_lshr_b64 v[5:6], v[2:3], v5
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
; GCN-NEXT: v_lshr_b64 v[2:3], v[2:3], v4
-; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v1, v6, v1, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v8, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = lshr i128 %lhs, %rhs
; GCN-LABEL: v_ashr_i128_vv:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_sub_i32_e32 v9, vcc, 64, v4
-; GCN-NEXT: v_lshr_b64 v[7:8], v[0:1], v4
-; GCN-NEXT: v_lshl_b64 v[9:10], v[2:3], v9
-; GCN-NEXT: v_ashrrev_i32_e32 v11, 31, v3
-; GCN-NEXT: v_or_b32_e32 v8, v8, v10
-; GCN-NEXT: v_subrev_i32_e32 v10, vcc, 64, v4
-; GCN-NEXT: v_ashr_i64 v[5:6], v[2:3], v4
-; GCN-NEXT: v_ashr_i64 v[2:3], v[2:3], v10
-; GCN-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
-; GCN-NEXT: v_or_b32_e32 v7, v7, v9
-; GCN-NEXT: v_cndmask_b32_e64 v5, v11, v5, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v3, v3, v8, s[4:5]
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v7, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v6, v11, v6, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GCN-NEXT: v_mov_b32_e32 v2, v5
-; GCN-NEXT: v_mov_b32_e32 v3, v6
+; GCN-NEXT: v_sub_i32_e32 v7, vcc, 64, v4
+; GCN-NEXT: v_lshr_b64 v[5:6], v[0:1], v4
+; GCN-NEXT: v_lshl_b64 v[7:8], v[2:3], v7
+; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4
+; GCN-NEXT: v_or_b32_e32 v7, v5, v7
+; GCN-NEXT: v_subrev_i32_e32 v5, vcc, 64, v4
+; GCN-NEXT: v_or_b32_e32 v8, v6, v8
+; GCN-NEXT: v_ashr_i64 v[5:6], v[2:3], v5
+; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
+; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v8, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
+; GCN-NEXT: v_ashr_i64 v[4:5], v[2:3], v4
+; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = ashr i128 %lhs, %rhs
ret i128 %shl
; GCN-LABEL: v_shl_i128_vk:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshl_b64 v[2:3], v[2:3], 17
; GCN-NEXT: v_lshrrev_b32_e32 v4, 15, v1
-; GCN-NEXT: v_lshl_b64 v[0:1], v[0:1], 17
-; GCN-NEXT: v_or_b32_e32 v2, v2, v4
+; GCN-NEXT: v_lshlrev_b32_e32 v5, 17, v2
+; GCN-NEXT: v_or_b32_e32 v4, v5, v4
+; GCN-NEXT: v_alignbit_b32 v1, v1, v0, 15
+; GCN-NEXT: v_alignbit_b32 v3, v3, v2, 15
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 17, v0
+; GCN-NEXT: v_mov_b32_e32 v2, v4
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = shl i128 %lhs, 17
ret i128 %shl
; GCN-LABEL: v_lshr_i128_vk:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshr_b64 v[0:1], v[2:3], 1
+; GCN-NEXT: v_alignbit_b32 v0, v3, v2, 1
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 1, v3
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-LABEL: v_ashr_i128_vk:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshl_b64 v[4:5], v[2:3], 31
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 1, v1
-; GCN-NEXT: v_or_b32_e32 v4, v0, v4
-; GCN-NEXT: v_mov_b32_e32 v0, v4
-; GCN-NEXT: v_ashr_i64 v[2:3], v[2:3], 33
-; GCN-NEXT: v_mov_b32_e32 v1, v5
+; GCN-NEXT: v_ashr_i64 v[4:5], v[2:3], 33
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 31, v2
+; GCN-NEXT: v_lshrrev_b32_e32 v1, 1, v1
+; GCN-NEXT: v_or_b32_e32 v0, v1, v0
+; GCN-NEXT: v_alignbit_b32 v1, v3, v2, 1
+; GCN-NEXT: v_mov_b32_e32 v2, v4
+; GCN-NEXT: v_mov_b32_e32 v3, v5
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = ashr i128 %lhs, 33
ret i128 %shl
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v1, vcc, 64, v0
-; GCN-NEXT: v_subrev_i32_e32 v3, vcc, 64, v0
-; GCN-NEXT: v_lshr_b64 v[1:2], 17, v1
-; GCN-NEXT: v_lshl_b64 v[4:5], 17, v3
+; GCN-NEXT: v_lshr_b64 v[2:3], 17, v1
+; GCN-NEXT: v_subrev_i32_e32 v1, vcc, 64, v0
+; GCN-NEXT: v_lshl_b64 v[4:5], 17, v1
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v0
-; GCN-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; GCN-NEXT: v_cndmask_b32_e64 v3, 0, v2, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v1, s[4:5]
; GCN-NEXT: v_lshl_b64 v[0:1], 17, v0
-; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v3, 0, v3, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = shl i128 17, %rhs
ret i128 %shl
; GCN-LABEL: v_lshr_i128_kv:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s7, 0
-; GCN-NEXT: s_movk_i32 s6, 0x41
-; GCN-NEXT: v_lshr_b64 v[2:3], s[6:7], v0
+; GCN-NEXT: s_movk_i32 s4, 0x41
+; GCN-NEXT: s_mov_b32 s5, 0
+; GCN-NEXT: v_lshr_b64 v[1:2], s[4:5], v0
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v0
+; GCN-NEXT: v_mov_b32_e32 v3, s4
; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-NEXT: v_mov_b32_e32 v2, s6
-; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT: s_and_b64 vcc, s[4:5], vcc
+; GCN-NEXT: v_cndmask_b32_e64 v0, v3, v1, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v2, vcc
; GCN-NEXT: v_mov_b32_e32 v2, 0
-; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = lshr i128 65, %rhs
; GCN-LABEL: v_ashr_i128_kv:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshr_b64 v[2:3], 33, v0
+; GCN-NEXT: v_lshr_b64 v[1:2], 33, v0
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 64, v0
; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT: s_and_b64 vcc, s[4:5], vcc
+; GCN-NEXT: v_cndmask_b32_e64 v0, 33, v1, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v2, vcc
; GCN-NEXT: v_mov_b32_e32 v2, 0
-; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 33, v0, s[4:5]
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = ashr i128 33, %rhs
; GCN-NEXT: v_lshl_b64 v[16:17], v[0:1], v9
; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GCN-NEXT: v_cndmask_b32_e64 v9, v17, v19, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
; GCN-NEXT: v_cndmask_b32_e64 v9, v16, v18, s[4:5]
+; GCN-NEXT: v_sub_i32_e64 v16, s[6:7], 64, v12
+; GCN-NEXT: v_cndmask_b32_e64 v11, v17, v19, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
-; GCN-NEXT: v_sub_i32_e32 v11, vcc, 64, v12
; GCN-NEXT: v_lshl_b64 v[9:10], v[6:7], v12
-; GCN-NEXT: v_lshr_b64 v[16:17], v[4:5], v11
-; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
-; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_lshr_b64 v[16:17], v[4:5], v16
; GCN-NEXT: v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
+; GCN-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
+; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
; GCN-NEXT: v_subrev_i32_e32 v9, vcc, 64, v12
; GCN-NEXT: v_or_b32_e32 v11, v10, v17
; GCN-NEXT: v_lshl_b64 v[9:10], v[4:5], v9
+; GCN-NEXT: v_or_b32_e32 v15, v13, v15
+; GCN-NEXT: v_or_b32_e32 v14, v12, v14
; GCN-NEXT: s_and_b64 vcc, s[8:9], s[6:7]
-; GCN-NEXT: v_cndmask_b32_e32 v17, v10, v11, vcc
-; GCN-NEXT: v_or_b32_e32 v11, v13, v15
-; GCN-NEXT: v_or_b32_e32 v10, v12, v14
+; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
; GCN-NEXT: v_lshl_b64 v[0:1], v[0:1], v8
; GCN-NEXT: v_lshl_b64 v[4:5], v[4:5], v12
-; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v16, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v7, v17, v7, s[6:7]
; GCN-NEXT: v_cndmask_b32_e64 v6, v9, v6, s[6:7]
-; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v9, v10, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v7, v9, v7, s[6:7]
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = shl <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
; GCN-NEXT: v_lshr_b64 v[16:17], v[2:3], v9
; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GCN-NEXT: v_cndmask_b32_e64 v9, v17, v19, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GCN-NEXT: v_cndmask_b32_e64 v9, v16, v18, s[4:5]
+; GCN-NEXT: v_sub_i32_e64 v16, s[6:7], 64, v12
+; GCN-NEXT: v_cndmask_b32_e64 v11, v17, v19, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, v9, v0, vcc
-; GCN-NEXT: v_sub_i32_e32 v11, vcc, 64, v12
; GCN-NEXT: v_lshr_b64 v[9:10], v[4:5], v12
-; GCN-NEXT: v_lshl_b64 v[16:17], v[6:7], v11
-; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
-; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_lshl_b64 v[16:17], v[6:7], v16
; GCN-NEXT: v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
+; GCN-NEXT: v_cndmask_b32_e32 v1, v11, v1, vcc
+; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
; GCN-NEXT: v_subrev_i32_e32 v9, vcc, 64, v12
; GCN-NEXT: v_or_b32_e32 v11, v10, v17
; GCN-NEXT: v_lshr_b64 v[9:10], v[6:7], v9
+; GCN-NEXT: v_or_b32_e32 v15, v13, v15
+; GCN-NEXT: v_or_b32_e32 v14, v12, v14
; GCN-NEXT: s_and_b64 vcc, s[8:9], s[6:7]
-; GCN-NEXT: v_cndmask_b32_e32 v17, v10, v11, vcc
-; GCN-NEXT: v_or_b32_e32 v11, v13, v15
-; GCN-NEXT: v_or_b32_e32 v10, v12, v14
+; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
; GCN-NEXT: v_lshr_b64 v[2:3], v[2:3], v8
; GCN-NEXT: v_lshr_b64 v[6:7], v[6:7], v12
-; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v16, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v5, v17, v5, s[6:7]
; GCN-NEXT: v_cndmask_b32_e64 v4, v9, v4, s[6:7]
-; GCN-NEXT: v_cndmask_b32_e64 v3, 0, v3, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v9, v10, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v5, v9, v5, s[6:7]
; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v3, 0, v3, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = lshr <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
; GCN-NEXT: v_ashr_i64 v[16:17], v[2:3], v9
; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GCN-NEXT: v_cndmask_b32_e64 v9, v17, v19, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
; GCN-NEXT: v_cndmask_b32_e64 v9, v16, v18, s[4:5]
+; GCN-NEXT: v_sub_i32_e64 v16, s[6:7], 64, v12
+; GCN-NEXT: v_cndmask_b32_e64 v11, v17, v19, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, v9, v0, vcc
-; GCN-NEXT: v_sub_i32_e32 v11, vcc, 64, v12
; GCN-NEXT: v_lshr_b64 v[9:10], v[4:5], v12
-; GCN-NEXT: v_lshl_b64 v[16:17], v[6:7], v11
-; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
-; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_lshl_b64 v[16:17], v[6:7], v16
; GCN-NEXT: v_cmp_eq_u64_e64 s[8:9], 0, v[14:15]
+; GCN-NEXT: v_cndmask_b32_e32 v1, v11, v1, vcc
+; GCN-NEXT: v_or_b32_e32 v16, v9, v16
+; GCN-NEXT: v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
; GCN-NEXT: v_subrev_i32_e32 v9, vcc, 64, v12
; GCN-NEXT: v_or_b32_e32 v11, v10, v17
; GCN-NEXT: v_ashr_i64 v[9:10], v[6:7], v9
+; GCN-NEXT: v_or_b32_e32 v15, v13, v15
+; GCN-NEXT: v_or_b32_e32 v14, v12, v14
; GCN-NEXT: s_and_b64 vcc, s[8:9], s[6:7]
-; GCN-NEXT: v_cndmask_b32_e32 v17, v10, v11, vcc
-; GCN-NEXT: v_or_b32_e32 v11, v13, v15
-; GCN-NEXT: v_or_b32_e32 v10, v12, v14
-; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[10:11]
+; GCN-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v16, vcc
; GCN-NEXT: v_cndmask_b32_e64 v4, v9, v4, s[6:7]
+; GCN-NEXT: v_cndmask_b32_e32 v9, v10, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v5, v9, v5, s[6:7]
; GCN-NEXT: v_ashr_i64 v[8:9], v[2:3], v8
-; GCN-NEXT: v_ashrrev_i32_e32 v2, 31, v3
-; GCN-NEXT: v_cndmask_b32_e64 v3, v2, v9, s[4:5]
-; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[4:5]
+; GCN-NEXT: v_ashrrev_i32_e32 v3, 31, v3
+; GCN-NEXT: v_cndmask_b32_e64 v2, v3, v8, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[4:5]
; GCN-NEXT: v_ashr_i64 v[8:9], v[6:7], v12
-; GCN-NEXT: v_ashrrev_i32_e32 v6, 31, v7
-; GCN-NEXT: v_cndmask_b32_e32 v7, v6, v9, vcc
-; GCN-NEXT: v_cndmask_b32_e64 v5, v17, v5, s[6:7]
-; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
+; GCN-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; GCN-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%shl = ashr <2 x i128> %lhs, %rhs
ret <2 x i128> %shl
; CHECK-LABEL: {{^}}main:
; CHECK: s_wqm
-; CHECK: s_load_dwordx4
; CHECK: s_load_dwordx8
+; CHECK: s_load_dwordx4
; CHECK: s_waitcnt lgkmcnt(0)
; CHECK: image_sample
; CHECK: s_waitcnt vmcnt(0)
; GCN-NEXT: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: .Ltmp10:
-; GCN-NEXT: ;DEBUG_VALUE: split_f64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_f64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_f64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_f64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 32 32] $vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_f64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 0 32] $vgpr0
; GCN-NEXT: .loc 0 16 5 prologue_end ; /tmp/dbg.cl:16:5
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-NEXT: .Ltmp11:
; GCN-NEXT: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: .Ltmp14:
-; GCN-NEXT: ;DEBUG_VALUE: split_i64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_i64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_i64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_i64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 32 32] $vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_i64_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 0 32] $vgpr0
; GCN-NEXT: .loc 0 24 5 prologue_end ; /tmp/dbg.cl:24:5
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-NEXT: .Ltmp15:
; GCN-NEXT: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: .Ltmp16:
-; GCN-NEXT: ;DEBUG_VALUE: split_ptr_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_ptr_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
-; GCN-NEXT: ;DEBUG_VALUE: split_ptr_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr0_vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_ptr_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 32 32] $vgpr1
+; GCN-NEXT: ;DEBUG_VALUE: split_ptr_arg:arg <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef, DW_OP_LLVM_fragment 0 32] $vgpr0
; GCN-NEXT: .loc 0 28 5 prologue_end ; /tmp/dbg.cl:28:5
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-NEXT: .Ltmp17:
; GFX9-LABEL: shuffle_v4f16_234u:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_3uu7:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v2, v[2:3], off offset:4
; GFX9-NEXT: global_load_dword v0, v[0:1], off offset:4
-; GFX9-NEXT: global_load_dword v1, v[2:3], off offset:4
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v4f16_0145:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_0167:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-NEXT: v_mov_b32_e32 v1, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_2345:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_2367:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_4501:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[3:4], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v[0:1], off
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_4523:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-LABEL: shuffle_v4f16_6701:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v[0:1], off
+; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-NEXT: v_mov_b32_e32 v1, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4f16_6723:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-LABEL: shuffle_v4f16_2356:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
-; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
+; GFX9-NEXT: v_mov_b32_e32 v6, 0xffff
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_and_b32_sdwa v0, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshl_or_b32 v1, v3, 16, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v4f16_5623:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v0, 0xffff
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
+; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_sdwa v0, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v4f16_5734:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v0, v[0:1], off offset:4
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
+; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:4
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_and_b32_sdwa v4, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_sdwa v3, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX9-NEXT: v_and_b32_sdwa v2, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v2
-; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v1
+; GFX9-NEXT: v_and_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshl_or_b32 v1, v0, 16, v3
+; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v4i16_2356:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[0:1], v[2:3], off
-; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
+; GFX9-NEXT: v_mov_b32_e32 v6, 0xffff
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_and_b32_sdwa v0, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_lshl_or_b32 v1, v3, 16, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x i16>, <4 x i16> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v4i16_0167:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-NEXT: v_mov_b32_e32 v1, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x i16>, <4 x i16> addrspace(1)* %arg0
%val1 = load <4 x i16>, <4 x i16> addrspace(1)* %arg1
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
+; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
-; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v3, v1, v0
-; GFX9-NEXT: v_and_b32_e32 v4, v1, v2
-; GFX9-NEXT: v_lshl_or_b32 v1, v0, 16, v3
-; GFX9-NEXT: v_lshl_or_b32 v0, v2, 16, v4
+; GFX9-NEXT: v_and_b32_e32 v1, v2, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_lshl_or_b32 v1, v0, 16, v1
+; GFX9-NEXT: v_and_b32_e32 v0, v2, v3
+; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v8f16_4589:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
; GFX9-NEXT: global_load_dword v0, v[0:1], off offset:8
-; GFX9-NEXT: global_load_dword v1, v[2:3], off
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v1, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <8 x half>, <8 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v8f16_10_11_2_3:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v2, v[2:3], off offset:4
; GFX9-NEXT: global_load_dword v1, v[0:1], off offset:4
-; GFX9-NEXT: global_load_dword v0, v[2:3], off offset:4
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v0, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <8 x half>, <8 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v8f16_13_14_2_3:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v1, v[0:1], off offset:4
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
-; GFX9-NEXT: v_mov_b32_e32 v0, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: global_load_dword v1, v[0:1], off offset:4
+; GFX9-NEXT: v_mov_b32_e32 v6, 0xffff
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_and_b32_sdwa v0, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <8 x half>, <8 x half> addrspace(1)* %arg0
%val1 = load <8 x half>, <8 x half> addrspace(1)* %arg1
; GFX9-LABEL: shuffle_v6f16_452367:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-NEXT: global_load_dword v3, v[2:3], off
; GFX9-NEXT: global_load_dwordx3 v[0:2], v[0:1], off
-; GFX9-NEXT: global_load_dword v3, v[3:4], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v0, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, v2
; GFX9-NEXT: v_mov_b32_e32 v2, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <6 x half>, <6 x half> addrspace(1)* %arg0
; GFX9-LABEL: shuffle_v4f16_0456:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v[2:3], off
-; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
-; GFX9-NEXT: v_and_b32_e32 v0, v3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
-; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v3
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT: v_and_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_and_b32_e32 v0, v1, v0
+; GFX9-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-NEXT: v_lshl_or_b32 v1, v3, 16, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
%val1 = load <4 x half>, <4 x half> addrspace(1)* %arg1
; DEFAULT-LABEL: {{^}}main:
; DEFAULT: s_load_dwordx4
; DEFAULT: s_load_dwordx4
+; DEFAULT: s_waitcnt lgkmcnt(0)
+; DEFAULT: buffer_load_format_xyzw
+; DEFAULT: s_waitcnt vmcnt(0)
+; DEFAULT: buffer_load_format_xyzw
; DEFAULT: s_waitcnt vmcnt(0)
; DEFAULT: exp
-; DEFAULT: s_waitcnt lgkmcnt(0)
-; DEFAULT: s_endpgm
+; DEFAULT-NEXT: exp
+; DEFAULT-NEXT: s_endpgm
define amdgpu_vs void @main(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, <16 x i8> addrspace(4)* inreg %arg3, <16 x i8> addrspace(4)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(4)* inreg %constptr) #0 {
main_body:
%tmp = getelementptr <16 x i8>, <16 x i8> addrspace(4)* %arg3, i32 0
; GFX9-O0: buffer_store_dword v1
; GFX9: s_swappc_b64
%tmp134 = call i64 @called_i64(i64 %tmp107)
-; GFX9-O0: buffer_load_dword v3
-; GFX9-O0: buffer_load_dword v4
+; GFX9-O0: buffer_load_dword v6
+; GFX9-O0: buffer_load_dword v7
%tmp136 = add i64 %tmp134, %tmp107
%tmp137 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp136)
%tmp138 = bitcast i64 %tmp137 to <2 x i32>
declare void @llvm.amdgcn.raw.buffer.store.i32(i32, <4 x i32>, i32, i32, i32)
declare void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32>, <4 x i32>, i32, i32, i32)
declare void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float>, <4 x i32>, i32, i32, i32)
-declare void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32)
declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32)
declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32)