From 6515c524b0ae50dd5bb052558afa8c81d3a75780 Mon Sep 17 00:00:00 2001 From: "Kevin P. Neal" Date: Mon, 11 Nov 2019 14:21:03 -0500 Subject: [PATCH] [FPEnv] clang support for constrained FP builtins Change the IRBuilder and clang so that constrained FP intrinsics will be emitted for builtins when appropriate. Only non-target-specific builtins are affected in this patch. Differential Revision: https://reviews.llvm.org/D70256 --- clang/lib/CodeGen/CGBuiltin.cpp | 181 ++++++++++++++++++++----- clang/test/CodeGen/constrained-math-builtins.c | 150 ++++++++++++++++++++ llvm/include/llvm/IR/IRBuilder.h | 33 +++++ 3 files changed, 330 insertions(+), 34 deletions(-) create mode 100644 clang/test/CodeGen/constrained-math-builtins.c diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 2b27382..68c956a 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -349,6 +349,58 @@ static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { } // Emit a simple mangled intrinsic that has 1 argument and a return type +// matching the argument type. Depending on mode, this may be a constrained +// floating-point intrinsic. +static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, + const CallExpr *E, unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); + + if (CGF.Builder.getIsFPConstrained()) { + Value *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); + return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); + } else { + Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); + return CGF.Builder.CreateCall(F, Src0); + } +} + +// Emit an intrinsic that has 2 operands of the same type as its result. +// Depending on mode, this may be a constrained floating-point intrinsic. +static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, + const CallExpr *E, unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); + llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); + + if (CGF.Builder.getIsFPConstrained()) { + Value *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); + return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); + } else { + Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); + return CGF.Builder.CreateCall(F, { Src0, Src1 }); + } +} + +// Emit an intrinsic that has 3 operands of the same type as its result. +// Depending on mode, this may be a constrained floating-point intrinsic. +static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, + const CallExpr *E, unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); + llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); + llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); + + if (CGF.Builder.getIsFPConstrained()) { + Value *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); + return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); + } else { + Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); + return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); + } +} + +// Emit a simple mangled intrinsic that has 1 argument and a return type // matching the argument type. static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E, @@ -394,15 +446,22 @@ static Value *emitFPIntBuiltin(CodeGenFunction &CGF, } // Emit an intrinsic that has overloaded integer result and fp operand. -static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF, - const CallExpr *E, - unsigned IntrinsicID) { - llvm::Type *ResultType = CGF.ConvertType(E->getType()); - llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); +static Value * +emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, + unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { + llvm::Type *ResultType = CGF.ConvertType(E->getType()); + llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); - Function *F = CGF.CGM.getIntrinsic(IntrinsicID, - {ResultType, Src0->getType()}); - return CGF.Builder.CreateCall(F, Src0); + if (CGF.Builder.getIsFPConstrained()) { + Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, + {ResultType, Src0->getType()}); + return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); + } else { + Function *F = + CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); + return CGF.Builder.CreateCall(F, Src0); + } } /// EmitFAbs - Emit a call to @llvm.fabs(). @@ -1560,7 +1619,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf: case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::ceil, + Intrinsic::experimental_constrained_ceil)); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -1579,7 +1640,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosf: case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::cos, + Intrinsic::experimental_constrained_cos)); case Builtin::BIexp: case Builtin::BIexpf: @@ -1588,7 +1651,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expf: case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::exp, + Intrinsic::experimental_constrained_exp)); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -1597,7 +1662,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2f: case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::exp2, + Intrinsic::experimental_constrained_exp2)); case Builtin::BIfabs: case Builtin::BIfabsf: @@ -1616,7 +1683,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf: case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::floor, + Intrinsic::experimental_constrained_floor)); case Builtin::BIfma: case Builtin::BIfmaf: @@ -1625,7 +1694,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaf: case Builtin::BI__builtin_fmaf16: case Builtin::BI__builtin_fmal: - return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma)); + return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::fma, + Intrinsic::experimental_constrained_fma)); case Builtin::BIfmax: case Builtin::BIfmaxf: @@ -1634,7 +1705,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxl: - return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::maxnum, + Intrinsic::experimental_constrained_maxnum)); case Builtin::BIfmin: case Builtin::BIfminf: @@ -1643,7 +1716,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminl: - return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::minnum, + Intrinsic::experimental_constrained_minnum)); // fmod() is a special-case. It maps to the frem instruction rather than an // LLVM intrinsic. @@ -1666,7 +1741,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logf: case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::log, + Intrinsic::experimental_constrained_log)); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -1675,7 +1752,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10f: case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::log10, + Intrinsic::experimental_constrained_log10)); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -1684,7 +1763,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2f: case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::log2, + Intrinsic::experimental_constrained_log2)); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -1692,7 +1773,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyint: case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::nearbyint, + Intrinsic::experimental_constrained_nearbyint)); case Builtin::BIpow: case Builtin::BIpowf: @@ -1701,7 +1784,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powf: case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powl: - return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::pow, + Intrinsic::experimental_constrained_pow)); case Builtin::BIrint: case Builtin::BIrintf: @@ -1710,7 +1795,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf: case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::rint, + Intrinsic::experimental_constrained_rint)); case Builtin::BIround: case Builtin::BIroundf: @@ -1719,7 +1806,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf: case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::round, + Intrinsic::experimental_constrained_round)); case Builtin::BIsin: case Builtin::BIsinf: @@ -1728,7 +1817,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinf: case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::sin, + Intrinsic::experimental_constrained_sin)); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -1737,7 +1828,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtf: case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::sqrt, + Intrinsic::experimental_constrained_sqrt)); case Builtin::BItrunc: case Builtin::BItruncf: @@ -1746,7 +1839,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf: case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: - return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, + Intrinsic::trunc, + Intrinsic::experimental_constrained_trunc)); case Builtin::BIlround: case Builtin::BIlroundf: @@ -1754,7 +1849,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lround: case Builtin::BI__builtin_lroundf: case Builtin::BI__builtin_lroundl: - return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround)); + return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( + *this, E, Intrinsic::lround, + Intrinsic::experimental_constrained_lround)); case Builtin::BIllround: case Builtin::BIllroundf: @@ -1762,7 +1859,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llround: case Builtin::BI__builtin_llroundf: case Builtin::BI__builtin_llroundl: - return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround)); + return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( + *this, E, Intrinsic::llround, + Intrinsic::experimental_constrained_llround)); case Builtin::BIlrint: case Builtin::BIlrintf: @@ -1770,7 +1869,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lrint: case Builtin::BI__builtin_lrintf: case Builtin::BI__builtin_lrintl: - return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint)); + return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( + *this, E, Intrinsic::lrint, + Intrinsic::experimental_constrained_lrint)); case Builtin::BIllrint: case Builtin::BIllrintf: @@ -1778,7 +1879,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llrint: case Builtin::BI__builtin_llrintf: case Builtin::BI__builtin_llrintl: - return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint)); + return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( + *this, E, Intrinsic::llrint, + Intrinsic::experimental_constrained_llrint)); default: break; @@ -2180,13 +2283,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powi: case Builtin::BI__builtin_powif: - case Builtin::BI__builtin_powil: { + case Builtin::BI__builtin_powil: + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi)); +#if 0 Value *Base = EmitScalarExpr(E->getArg(0)); Value *Exponent = EmitScalarExpr(E->getArg(1)); llvm::Type *ArgType = Base->getType(); - Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); - return RValue::get(Builder.CreateCall(F, {Base, Exponent})); - } + // XXX Maybe + if (Builder.getIsFPConstrained()) { + Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, ArgType); + return RValue::get(Builder.CreateConstrainedFPCall(F, {Base, Exponent})); + } + else { + Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); + return RValue::get(Builder.CreateCall(F, {Base, Exponent})); + } +#endif case Builtin::BI__builtin_isgreater: case Builtin::BI__builtin_isgreaterequal: diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c new file mode 100644 index 0000000..c7897ca --- /dev/null +++ b/clang/test/CodeGen/constrained-math-builtins.c @@ -0,0 +1,150 @@ +// RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=strict -w -S -o - -emit-llvm %s | FileCheck %s + +// Test codegen of constrained math builtins. + +void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) { + f = __builtin_fmod(f,f); f = __builtin_fmodf(f,f); f = __builtin_fmodl(f,f); + +// CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata) + + __builtin_pow(f,f); __builtin_powf(f,f); __builtin_powl(f,f); + +// CHECK: declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata) + + __builtin_powi(f,f); __builtin_powif(f,f); __builtin_powil(f,f); + +// CHECK: declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata) + + __builtin_ceil(f); __builtin_ceilf(f); __builtin_ceill(f); + +// CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata, metadata) + + __builtin_cos(f); __builtin_cosf(f); __builtin_cosl(f); + +// CHECK: declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata) + + __builtin_exp(f); __builtin_expf(f); __builtin_expl(f); + +// CHECK: declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata) + + __builtin_exp2(f); __builtin_exp2f(f); __builtin_exp2l(f); + +// CHECK: declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata) + + __builtin_floor(f); __builtin_floorf(f); __builtin_floorl(f); + +// CHECK: declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata, metadata) + + __builtin_fma(f,f,f); __builtin_fmaf(f,f,f); __builtin_fmal(f,f,f); + +// CHECK: declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata) + + __builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f); + +// CHECK: declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata, metadata) + + __builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f); + +// CHECK: declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata, metadata) + + __builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f); + +// CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata) + + __builtin_llround(f); __builtin_llroundf(f); __builtin_llroundl(f); + +// CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata) + + __builtin_log(f); __builtin_logf(f); __builtin_logl(f); + +// CHECK: declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata) + + __builtin_log10(f); __builtin_log10f(f); __builtin_log10l(f); + +// CHECK: declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata) + + __builtin_log2(f); __builtin_log2f(f); __builtin_log2l(f); + +// CHECK: declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata) + + __builtin_lrint(f); __builtin_lrintf(f); __builtin_lrintl(f); + +// CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f64(double, metadata, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f32(float, metadata, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80, metadata, metadata) + + __builtin_lround(f); __builtin_lroundf(f); __builtin_lroundl(f); + +// CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f64(double, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f32(float, metadata) +// CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80, metadata) + + __builtin_nearbyint(f); __builtin_nearbyintf(f); __builtin_nearbyintl(f); + +// CHECK: declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata) + + __builtin_rint(f); __builtin_rintf(f); __builtin_rintl(f); + +// CHECK: declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata) + + __builtin_round(f); __builtin_roundf(f); __builtin_roundl(f); + +// CHECK: declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata, metadata) + + __builtin_sin(f); __builtin_sinf(f); __builtin_sinl(f); + +// CHECK: declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata) + + __builtin_sqrt(f); __builtin_sqrtf(f); __builtin_sqrtl(f); + +// CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata) + + __builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f); + +// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata) +// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata) +// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata, metadata) +}; + diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 01d3535..b7c0af1 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -2087,6 +2087,8 @@ public: case Intrinsic::experimental_constrained_fpext: case Intrinsic::experimental_constrained_fptoui: case Intrinsic::experimental_constrained_fptosi: + case Intrinsic::experimental_constrained_lround: + case Intrinsic::experimental_constrained_llround: C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr, Name); break; @@ -2297,6 +2299,37 @@ public: Args, OpBundles, Name, FPMathTag); } + // Deprecated [opaque pointer types] + CallInst *CreateConstrainedFPCall( + Value *Callee, ArrayRef Args, const Twine &Name = "", + Optional Rounding = None, + Optional Except = None) { + llvm::SmallVector UseArgs; + + for (auto *OneArg : Args) + UseArgs.push_back(OneArg); + Function *F = cast(Callee); + switch (F->getIntrinsicID()) { + default: + UseArgs.push_back(getConstrainedFPRounding(Rounding)); + break; + case Intrinsic::experimental_constrained_fpext: + case Intrinsic::experimental_constrained_fptoui: + case Intrinsic::experimental_constrained_fptosi: + case Intrinsic::experimental_constrained_lround: + case Intrinsic::experimental_constrained_llround: + // No rounding metadata for these intrinsics. + break; + } + UseArgs.push_back(getConstrainedFPExcept(Except)); + + CallInst *C = CreateCall( + cast(Callee->getType()->getPointerElementType()), Callee, + UseArgs, Name); + setConstrainedFPCallAttr(C); + return C; + } + Value *CreateSelect(Value *C, Value *True, Value *False, const Twine &Name = "", Instruction *MDFrom = nullptr) { if (auto *CC = dyn_cast(C)) -- 2.7.4