BUILTIN(__builtin_amdgcn_trig_preopf, "ffi", "nc")
BUILTIN(__builtin_amdgcn_rcp, "dd", "nc")
BUILTIN(__builtin_amdgcn_rcpf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_sqrt, "dd", "nc")
+BUILTIN(__builtin_amdgcn_sqrtf, "ff", "nc")
BUILTIN(__builtin_amdgcn_rsq, "dd", "nc")
BUILTIN(__builtin_amdgcn_rsqf, "ff", "nc")
BUILTIN(__builtin_amdgcn_rsq_clamp, "dd", "nc")
TARGET_BUILTIN(__builtin_amdgcn_div_fixuph, "hhhh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_rcph, "hh", "nc", "16-bit-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sqrth, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_rsqh, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_sinh, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_cosh, "hh", "nc", "16-bit-insts")
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
+ case AMDGPU::BI__builtin_amdgcn_sqrt:
+ case AMDGPU::BI__builtin_amdgcn_sqrtf:
+ case AMDGPU::BI__builtin_amdgcn_sqrth:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
*out = __builtin_amdgcn_rcph(a);
}
+// CHECK-LABEL: @test_sqrt_f16
+// CHECK: call half @llvm.amdgcn.sqrt.f16
+void test_sqrt_f16(global half* out, half a)
+{
+ *out = __builtin_amdgcn_sqrth(a);
+}
+
// CHECK-LABEL: @test_rsq_f16
// CHECK: call half @llvm.amdgcn.rsq.f16
void test_rsq_f16(global half* out, half a)
*out = __builtin_amdgcn_rcp(a);
}
+// CHECK-LABEL: @test_sqrt_f32
+// CHECK: call float @llvm.amdgcn.sqrt.f32
+void test_sqrt_f32(global float* out, float a)
+{
+ *out = __builtin_amdgcn_sqrtf(a);
+}
+
+// CHECK-LABEL: @test_sqrt_f64
+// CHECK: call double @llvm.amdgcn.sqrt.f64
+void test_sqrt_f64(global double* out, double a)
+{
+ *out = __builtin_amdgcn_sqrt(a);
+}
+
// CHECK-LABEL: @test_rsq_f32
// CHECK: call float @llvm.amdgcn.rsq.f32
void test_rsq_f32(global float* out, float a)
{
*out = __builtin_amdgcn_div_fixuph(a, b, c); // expected-error {{'__builtin_amdgcn_div_fixuph' needs target feature 16-bit-insts}}
*out = __builtin_amdgcn_rcph(a); // expected-error {{'__builtin_amdgcn_rcph' needs target feature 16-bit-insts}}
+ *out = __builtin_amdgcn_sqrth(a); // expected-error {{'__builtin_amdgcn_sqrth' needs target feature 16-bit-insts}}
*out = __builtin_amdgcn_rsqh(a); // expected-error {{'__builtin_amdgcn_rsqh' needs target feature 16-bit-insts}}
*out = __builtin_amdgcn_sinh(a); // expected-error {{'__builtin_amdgcn_sinh' needs target feature 16-bit-insts}}
*out = __builtin_amdgcn_cosh(a); // expected-error {{'__builtin_amdgcn_cosh' needs target feature 16-bit-insts}}
[IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
+def int_amdgcn_sqrt : Intrinsic<
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
+>;
+
def int_amdgcn_rsq : Intrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]
>;
[(fmad node:$src0, node:$src1, node:$src2),
(AMDGPUfmad_ftz node:$src0, node:$src1, node:$src2)]
>;
+
+// FIXME: fsqrt should not select directly
+def any_amdgcn_sqrt : PatFrags<(ops node:$src0),
+ [(fsqrt node:$src0), (int_amdgcn_sqrt node:$src0)]
+>;
case Intrinsic::amdgcn_log_clamp:
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rcp_legacy:
+ case Intrinsic::amdgcn_sqrt:
case Intrinsic::amdgcn_rsq:
case Intrinsic::amdgcn_rsq_legacy:
case Intrinsic::amdgcn_rsq_clamp:
defm V_RCP_F32 : VOP1Inst <"v_rcp_f32", VOP_F32_F32, AMDGPUrcp>;
defm V_RCP_IFLAG_F32 : VOP1Inst <"v_rcp_iflag_f32", VOP_F32_F32, AMDGPUrcp_iflag>;
defm V_RSQ_F32 : VOP1Inst <"v_rsq_f32", VOP_F32_F32, AMDGPUrsq>;
-defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, fsqrt>;
+defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, any_amdgcn_sqrt>;
} // End SchedRW = [WriteTrans32]
let SchedRW = [WriteTrans64] in {
defm V_RCP_F64 : VOP1Inst <"v_rcp_f64", VOP_F64_F64, AMDGPUrcp>;
defm V_RSQ_F64 : VOP1Inst <"v_rsq_f64", VOP_F64_F64, AMDGPUrsq>;
-defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, fsqrt>;
+defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, any_amdgcn_sqrt>;
} // End SchedRW = [WriteTrans64]
let SchedRW = [WriteTrans32] in {
defm V_CVT_I16_F16 : VOP1Inst <"v_cvt_i16_f16", VOP_I16_F16, fp_to_sint>;
let SchedRW = [WriteTrans32] in {
defm V_RCP_F16 : VOP1Inst <"v_rcp_f16", VOP_F16_F16, AMDGPUrcp>;
-defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, fsqrt>;
+defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, any_amdgcn_sqrt>;
defm V_RSQ_F16 : VOP1Inst <"v_rsq_f16", VOP_F16_F16, AMDGPUrsq>;
defm V_LOG_F16 : VOP1Inst <"v_log_f16", VOP_F16_F16, flog2>;
defm V_EXP_F16 : VOP1Inst <"v_exp_f16", VOP_F16_F16, fexp2>;
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s
+
+define half @v_sqrt_f16(half %src) {
+; GCN-LABEL: v_sqrt_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f16_e32 v0, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %sqrt = call half @llvm.amdgcn.sqrt.f16(half %src)
+ ret half %sqrt
+}
+
+define half @v_fabs_sqrt_f16(half %src) {
+; GCN-LABEL: v_fabs_sqrt_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f16_e64 v0, |v0|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call half @llvm.fabs.f16(half %src)
+ %sqrt = call half @llvm.amdgcn.sqrt.f16(half %fabs.src)
+ ret half %sqrt
+}
+
+define half @v_fneg_fabs_sqrt_f16(half %src) {
+; GCN-LABEL: v_fneg_fabs_sqrt_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f16_e64 v0, -|v0|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call half @llvm.fabs.f16(half %src)
+ %neg.fabs.src = fneg half %fabs.src
+ %sqrt = call half @llvm.amdgcn.sqrt.f16(half %neg.fabs.src)
+ ret half %sqrt
+}
+
+declare half @llvm.amdgcn.sqrt.f16(half) #0
+declare half @llvm.fabs.f16(half) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s
+
+define float @v_sqrt_f32(float %src) {
+; GCN-LABEL: v_sqrt_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f32_e32 v0, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %sqrt = call float @llvm.amdgcn.sqrt.f32(float %src)
+ ret float %sqrt
+}
+
+define float @v_fabs_sqrt_f32(float %src) {
+; GCN-LABEL: v_fabs_sqrt_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f32_e64 v0, |v0|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call float @llvm.fabs.f32(float %src)
+ %sqrt = call float @llvm.amdgcn.sqrt.f32(float %fabs.src)
+ ret float %sqrt
+}
+
+define float @v_fneg_fabs_sqrt_f32(float %src) {
+; GCN-LABEL: v_fneg_fabs_sqrt_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f32_e64 v0, -|v0|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call float @llvm.fabs.f32(float %src)
+ %neg.fabs.src = fneg float %fabs.src
+ %sqrt = call float @llvm.amdgcn.sqrt.f32(float %neg.fabs.src)
+ ret float %sqrt
+}
+
+define double @v_sqrt_f64(double %src) {
+; GCN-LABEL: v_sqrt_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f64_e32 v[0:1], v[0:1]
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %sqrt = call double @llvm.amdgcn.sqrt.f64(double %src)
+ ret double %sqrt
+}
+
+define double @v_fabs_sqrt_f64(double %src) {
+; GCN-LABEL: v_fabs_sqrt_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f64_e64 v[0:1], |v[0:1]|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call double @llvm.fabs.f64(double %src)
+ %sqrt = call double @llvm.amdgcn.sqrt.f64(double %fabs.src)
+ ret double %sqrt
+}
+
+define double @v_fneg_fabs_sqrt_f64(double %src) {
+; GCN-LABEL: v_fneg_fabs_sqrt_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sqrt_f64_e64 v[0:1], -|v[0:1]|
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %fabs.src = call double @llvm.fabs.f64(double %src)
+ %neg.fabs.src = fneg double %fabs.src
+ %sqrt = call double @llvm.amdgcn.sqrt.f64(double %neg.fabs.src)
+ ret double %sqrt
+}
+
+declare float @llvm.amdgcn.sqrt.f32(float) #0
+declare double @llvm.amdgcn.sqrt.f64(double) #0
+declare float @llvm.fabs.f32(float) #0
+declare double @llvm.fabs.f64(double) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }