From: Matt Arsenault Date: Sat, 14 Feb 2015 04:22:00 +0000 (+0000) Subject: R600/SI: Fix implicit vcc operand to v_div_fmas_* X-Git-Tag: llvmorg-3.7.0-rc1~12075 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1bc9d95047f45a014832418cf1f3edd9ff92bd12;p=platform%2Fupstream%2Fllvm.git R600/SI: Fix implicit vcc operand to v_div_fmas_* This should allow finally fixing the f64 fdiv implementation. Test is disabled for VI since there seems to be a problem with one of the buffer load instructions on it. llvm-svn: 229236 --- diff --git a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp index d577d68..d96f03a 100644 --- a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp @@ -907,10 +907,9 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, } case Intrinsic::AMDGPU_div_fmas: - // FIXME: Dropping bool parameter. Work is needed to support the implicit - // read from VCC. return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, - Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), + Op.getOperand(4)); case Intrinsic::AMDGPU_div_fixup: return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, diff --git a/llvm/lib/Target/R600/AMDGPUInstrInfo.td b/llvm/lib/Target/R600/AMDGPUInstrInfo.td index 0e34392..d657ad0 100644 --- a/llvm/lib/Target/R600/AMDGPUInstrInfo.td +++ b/llvm/lib/Target/R600/AMDGPUInstrInfo.td @@ -35,6 +35,11 @@ def AMDGPUDivScaleOp : SDTypeProfile<2, 3, [SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>] >; +// float, float, float, vcc +def AMDGPUFmasOp : SDTypeProfile<1, 4, + [SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisInt<4>] +>; + //===----------------------------------------------------------------------===// // AMDGPU DAG Nodes // @@ -153,7 +158,7 @@ def AMDGPUdiv_scale : SDNode<"AMDGPUISD::DIV_SCALE", AMDGPUDivScaleOp>; // Special case divide FMA with scale and flags (src0 = Quotient, // src1 = Denominator, src2 = Numerator). -def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", SDTFPTernaryOp>; +def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", AMDGPUFmasOp>; // Single or double precision division fixup. // Special case divide fixup and flags(src0 = Quotient, src1 = diff --git a/llvm/lib/Target/R600/SIInstrInfo.td b/llvm/lib/Target/R600/SIInstrInfo.td index cb16be5..4082b57 100644 --- a/llvm/lib/Target/R600/SIInstrInfo.td +++ b/llvm/lib/Target/R600/SIInstrInfo.td @@ -1300,6 +1300,28 @@ multiclass VOP3Inst ; +// Special case for v_div_fmas_{f32|f64}, since it seems to be the +// only VOP instruction that implicitly reads VCC. +multiclass VOP3_VCC_Inst : VOP3_Helper < + op, opName, + P.Outs, + (ins InputModsNoDefault:$src0_modifiers, P.Src0RC64:$src0, + InputModsNoDefault:$src1_modifiers, P.Src1RC64:$src1, + InputModsNoDefault:$src2_modifiers, P.Src2RC64:$src2, + ClampMod:$clamp, + omod:$omod), + " $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod", + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i1:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), + (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers)), + (i1 VCC)))], + 3, 1 +>; + multiclass VOP3b_Helper pattern> : VOP3b_3_m < diff --git a/llvm/lib/Target/R600/SIInstructions.td b/llvm/lib/Target/R600/SIInstructions.td index 032d6c2..0f14dc3 100644 --- a/llvm/lib/Target/R600/SIInstructions.td +++ b/llvm/lib/Target/R600/SIInstructions.td @@ -1744,14 +1744,27 @@ let SchedRW = [WriteDouble, WriteSALU] in { defm V_DIV_SCALE_F64 : VOP3b_64 , "v_div_scale_f64", []>; } // let SchedRW = [WriteDouble] -let isCommutable = 1 in { -defm V_DIV_FMAS_F32 : VOP3Inst , "v_div_fmas_f32", +let isCommutable = 1, Uses = [VCC] in { + +// v_div_fmas_f32: +// result = src0 * src1 + src2 +// if (vcc) +// result *= 2^32 +// +defm V_DIV_FMAS_F32 : VOP3_VCC_Inst , "v_div_fmas_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fmas >; + let SchedRW = [WriteDouble] in { -defm V_DIV_FMAS_F64 : VOP3Inst , "v_div_fmas_f64", +// v_div_fmas_f64: +// result = src0 * src1 + src2 +// if (vcc) +// result *= 2^64 +// +defm V_DIV_FMAS_F64 : VOP3_VCC_Inst , "v_div_fmas_f64", VOP_F64_F64_F64_F64, AMDGPUdiv_fmas >; + } // End SchedRW = [WriteDouble] } // End isCommutable = 1 diff --git a/llvm/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/llvm/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll index a15c10f..ceffd05 100644 --- a/llvm/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll +++ b/llvm/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll @@ -1,6 +1,10 @@ ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; XUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; FIXME: Enable for VI. + +declare i32 @llvm.r600.read.tidig.x() nounwind readnone +declare void @llvm.AMDGPU.barrier.global() nounwind noduplicate declare float @llvm.AMDGPU.div.fmas.f32(float, float, float, i1) nounwind readnone declare double @llvm.AMDGPU.div.fmas.f64(double, double, double, i1) nounwind readnone @@ -13,7 +17,8 @@ declare double @llvm.AMDGPU.div.fmas.f64(double, double, double, i1) nounwind re ; VI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30 ; GCN-DAG: v_mov_b32_e32 [[VC:v[0-9]+]], [[SC]] ; GCN-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] -; GCN: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[SA]], [[VB]], [[VC]] +; GCN-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[SA]] +; GCN: v_div_fmas_f32 [[RESULT:v[0-9]+]], [[VA]], [[VB]], [[VC]] ; GCN: buffer_store_dword [[RESULT]], ; GCN: s_endpgm define void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind { @@ -29,3 +34,104 @@ define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, store double %result, double addrspace(1)* %out, align 8 ret void } + +; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc: +; SI: v_cmp_eq_i32_e64 vcc, s{{[0-9]+}}, 0 +; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind { + %cmp = icmp eq i32 %i, 0 + %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %cmp) nounwind readnone + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_imm_false_cond_to_vcc: +; SI: s_mov_b64 vcc, 0 +; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind { + %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 false) nounwind readnone + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_imm_true_cond_to_vcc: +; SI: s_mov_b64 vcc, -1 +; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c) nounwind { + %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 true) nounwind readnone + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_logical_cond_to_vcc: +; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}} +; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}} + +; SI-DAG: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 +; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0 +; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]] +; SI: v_div_fmas_f32 {{v[0-9]+}}, [[B]], [[A]], [[C]] +; SI: s_endpgm +define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 %d) nounwind { + %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.a = getelementptr float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2 + %gep.out = getelementptr float addrspace(1)* %out, i32 2 + + %a = load float addrspace(1)* %gep.a + %b = load float addrspace(1)* %gep.b + %c = load float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + %cmp1 = icmp ne i32 %d, 0 + %and = and i1 %cmp0, %cmp1 + + %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %and) nounwind readnone + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: +; SI: v_cmp_eq_i32_e64 [[CMPTID:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 +; SI: s_and_saveexec_b64 [[CMPTID]], [[CMPTID]] +; SI: s_xor_b64 [[CMPTID]], exec, [[CMPTID]] + +; SI: buffer_load_dword [[LOAD:v[0-9]+]] +; SI: v_cmp_ne_i32_e64 [[CMPLOAD:s\[[0-9]+:[0-9]+\]]], [[LOAD]], 0 +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, [[CMPLOAD]] + + +; SI: BB6_2: +; SI: s_or_b64 exec, exec, [[CMPTID]] +; SI: v_cmp_ne_i32_e32 vcc, 0, v0 +; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +; SI: buffer_store_dword +; SI: s_endpgm +define void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) nounwind { +entry: + %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep.out = getelementptr float addrspace(1)* %out, i32 2 + %gep.a = getelementptr float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2 + + %a = load float addrspace(1)* %gep.a + %b = load float addrspace(1)* %gep.b + %c = load float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + br i1 %cmp0, label %bb, label %exit + +bb: + %val = load i32 addrspace(1)* %dummy + %cmp1 = icmp ne i32 %val, 0 + br label %exit + +exit: + %cond = phi i1 [false, %entry], [%cmp1, %bb] + %result = call float @llvm.AMDGPU.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +}