bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
unsigned NumLoads) const {
- // TODO: This needs finer tuning
- if (NumLoads > 4)
+ const MachineOperand *FirstDst = nullptr;
+ const MachineOperand *SecondDst = nullptr;
+
+ if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) {
+ FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdst);
+ SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdst);
+ }
+
+ if (isSMRD(*FirstLdSt) && isSMRD(*FirstLdSt)) {
+ FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::sdst);
+ SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::sdst);
+ }
+
+ if ((isMUBUF(*FirstLdSt) && isMUBUF(*SecondLdSt)) ||
+ (isMTBUF(*FirstLdSt) && isMTBUF(*SecondLdSt))) {
+ FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdata);
+ SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdata);
+ }
+
+ if (!FirstDst || !SecondDst)
return false;
- if (isDS(*FirstLdSt) && isDS(*SecondLdSt))
- return true;
+ // Try to limit clustering based on the total number of bytes loaded
+ // rather than the number of instructions. This is done to help reduce
+ // register pressure. The method used is somewhat inexact, though,
+ // because it assumes that all loads in the cluster will load the
+ // same number of bytes as FirstLdSt.
- if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt))
- return true;
+ // The unit of this value is bytes.
+ // FIXME: This needs finer tuning.
+ unsigned LoadClusterThreshold = 16;
+
+ const MachineRegisterInfo &MRI =
+ FirstLdSt->getParent()->getParent()->getRegInfo();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
- return (isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) &&
- (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt));
+ return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
}
void
; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
-; GCN-NEXT: s_waitcnt
+; GCN: s_waitcnt
; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
; GCN-NEXT: buffer_store_dword [[RESULT]],
; GCN: s_endpgm
; We can't use an SGPR when forming madak
; GCN-LABEL: {{^}}s_v_madak_f32:
-; GCN: s_load_dword [[SB:s[0-9]+]]
+; GCN-DAG: s_load_dword [[SB:s[0-9]+]]
; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
; GCN-NOT: v_madak_f32
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s
; FUNC-LABEL: {{^}}cluster_arg_loads:
+; FIXME: Due to changes in the load clustering heuristics. We now longer
+; cluster all argument loads together.
+; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
-; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
+; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
store i32 %x, i32 addrspace(1)* %out0, align 4
store i32 %y, i32 addrspace(1)* %out1, align 4