}
case Builtin::BI__builtin_flt_rounds: {
- Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
+ Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F);
// RUN: %clang_cc1 -triple msp430-unknown-unknown -emit-llvm %s -o - | FileCheck %s
int test_builtin_flt_rounds() {
- // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i32 @llvm.flt.rounds()
+ // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i32 @llvm.get.rounding()
// CHECK-DAG: [[V1:[%A-Za-z0-9.]+]] = trunc i32 [[V0]] to i16
// CHECK-DAG: ret i16 [[V1]]
return __builtin_flt_rounds();
// CHECK: and i1
res = __builtin_flt_rounds();
- // CHECK: call i32 @llvm.flt.rounds(
+ // CHECK: call i32 @llvm.get.rounding(
}
// CHECK-LABEL: define{{.*}} void @test_float_builtin_ops
mode or state of floating point exceptions. Altering the floating point
environment requires special care. See :ref:`Floating Point Environment <floatenv>`.
-'``llvm.flt.rounds``' Intrinsic
+'``llvm.get.rounding``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
::
- declare i32 @llvm.flt.rounds()
+ declare i32 @llvm.get.rounding()
Overview:
"""""""""
-The '``llvm.flt.rounds``' intrinsic reads the current rounding mode.
+The '``llvm.get.rounding``' intrinsic reads the current rounding mode.
Semantics:
""""""""""
-The '``llvm.flt.rounds``' intrinsic returns the current rounding mode.
+The '``llvm.get.rounding``' intrinsic returns the current rounding mode.
Encoding of the returned values is same as the result of ``FLT_ROUNDS``,
specified by C standard:
Other values may be used to represent additional rounding modes, supported by a
target. These values are target-specific.
-
'``llvm.set.rounding``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
""""""""""
The argument is the required rounding mode. Encoding of rounding mode is
-the same as used by '``llvm.flt.rounds``'.
+the same as used by '``llvm.get.rounding``'.
Semantics:
""""""""""
/// 3 Round to -inf
/// 4 Round to nearest, ties to zero
/// Result is rounding mode and chain. Input is a chain.
- /// TODO: Rename this node to GET_ROUNDING.
- FLT_ROUNDS_,
+ GET_ROUNDING,
/// Set rounding mode.
/// The first operand is a chain pointer. The second specifies the required
- /// rounding mode, encoded in the same way as used in '``FLT_ROUNDS_``'.
+ /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
SET_ROUNDING,
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
//
let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
- def int_flt_rounds : DefaultAttrsIntrinsic<[llvm_i32_ty], []>;
+ def int_get_rounding : DefaultAttrsIntrinsic<[llvm_i32_ty], []>;
def int_set_rounding : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
}
ReplaceFPIntrinsicWithCall(CI, "copysignf", "copysign", "copysignl");
break;
}
- case Intrinsic::flt_rounds:
+ case Intrinsic::get_rounding:
// Lower to "round to the nearest"
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(ConstantInt::get(CI->getType(), 1));
SimpleFinishLegalizing = false;
break;
case ISD::EXTRACT_ELEMENT:
- case ISD::FLT_ROUNDS_:
+ case ISD::GET_ROUNDING:
case ISD::MERGE_VALUES:
case ISD::EH_RETURN:
case ISD::FRAME_TO_ARGS_OFFSET:
FA, Offset));
break;
}
- case ISD::FLT_ROUNDS_:
+ case ISD::GET_ROUNDING:
Results.push_back(DAG.getConstant(1, dl, Node->getValueType(0)));
Results.push_back(Node->getOperand(0));
break;
Res = PromoteIntRes_FP_TO_FP16_BF16(N);
break;
- case ISD::FLT_ROUNDS_: Res = PromoteIntRes_FLT_ROUNDS(N); break;
+ case ISD::GET_ROUNDING: Res = PromoteIntRes_GET_ROUNDING(N); break;
case ISD::AND:
case ISD::OR:
return DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0));
}
-SDValue DAGTypeLegalizer::PromoteIntRes_FLT_ROUNDS(SDNode *N) {
+SDValue DAGTypeLegalizer::PromoteIntRes_GET_ROUNDING(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);
case ISD::CTPOP: ExpandIntRes_CTPOP(N, Lo, Hi); break;
case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: ExpandIntRes_CTTZ(N, Lo, Hi); break;
- case ISD::FLT_ROUNDS_: ExpandIntRes_FLT_ROUNDS(N, Lo, Hi); break;
+ case ISD::GET_ROUNDING:ExpandIntRes_GET_ROUNDING(N, Lo, Hi); break;
case ISD::STRICT_FP_TO_SINT:
case ISD::FP_TO_SINT: ExpandIntRes_FP_TO_SINT(N, Lo, Hi); break;
case ISD::STRICT_FP_TO_UINT:
Hi = DAG.getConstant(0, dl, NVT);
}
-void DAGTypeLegalizer::ExpandIntRes_FLT_ROUNDS(SDNode *N, SDValue &Lo,
+void DAGTypeLegalizer::ExpandIntRes_GET_ROUNDING(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NBitWidth = NVT.getSizeInBits();
- Lo = DAG.getNode(ISD::FLT_ROUNDS_, dl, {NVT, MVT::Other}, N->getOperand(0));
+ Lo = DAG.getNode(ISD::GET_ROUNDING, dl, {NVT, MVT::Other}, N->getOperand(0));
SDValue Chain = Lo.getValue(1);
- // The high part is the sign of Lo, as -1 is a valid value for FLT_ROUNDS
+ // The high part is the sign of Lo, as -1 is a valid value for GET_ROUNDING
Hi = DAG.getNode(ISD::SRA, dl, NVT, Lo,
DAG.getShiftAmountConstant(NBitWidth - 1, NVT, dl));
SDValue PromoteIntRes_ADDSUBSHLSAT(SDNode *N);
SDValue PromoteIntRes_MULFIX(SDNode *N);
SDValue PromoteIntRes_DIVFIX(SDNode *N);
- SDValue PromoteIntRes_FLT_ROUNDS(SDNode *N);
+ SDValue PromoteIntRes_GET_ROUNDING(SDNode *N);
SDValue PromoteIntRes_VECREDUCE(SDNode *N);
SDValue PromoteIntRes_VP_REDUCE(SDNode *N);
SDValue PromoteIntRes_ABS(SDNode *N);
void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_TRUNCATE (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ZERO_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_FLT_ROUNDS (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_GET_ROUNDING (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_SINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_UINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_FP_TO_XINT_SAT (SDNode *N, SDValue &Lo, SDValue &Hi);
case Intrinsic::gcread:
case Intrinsic::gcwrite:
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
- case Intrinsic::flt_rounds:
- Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
+ case Intrinsic::get_rounding:
+ Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
setValue(&I, Res);
DAG.setRoot(Res.getValue(1));
return;
return "call_alloc";
// Floating point environment manipulation
- case ISD::FLT_ROUNDS_: return "flt_rounds";
+ case ISD::GET_ROUNDING: return "get_rounding";
case ISD::SET_ROUNDING: return "set_rounding";
// Bit manipulation
}
break;
}
+ case 'f':
+ if (Name.startswith("flt.rounds")) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
+ return true;
+ }
+ break;
case 'i':
case 'l': {
bool IsLifetimeStart = Name.startswith("lifetime.start");
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
- setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
+ setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
return false;
}
-SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
// The rounding mode is in bits 23:22 of the FPSCR.
// The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
// The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
return LowerFP_TO_INT_SAT(Op, DAG);
case ISD::FSINCOS:
return LowerFSINCOS(Op, DAG);
- case ISD::FLT_ROUNDS_:
- return LowerFLT_ROUNDS_(Op, DAG);
+ case ISD::GET_ROUNDING:
+ return LowerGET_ROUNDING(Op, DAG);
case ISD::SET_ROUNDING:
return LowerSET_ROUNDING(Op, DAG);
case ISD::MUL:
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
// iff target supports vfp2.
setOperationAction(ISD::BITCAST, MVT::i64, Custom);
- setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
+ setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
}
return DAG.getMergeValues(Ops, dl);
}
-SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
// The rounding mode is in bits 23:22 of the FPSCR.
// The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
// The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
case ISD::TRUNCATE: return LowerTruncate(Op.getNode(), DAG, Subtarget);
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND: return LowerVectorExtend(Op.getNode(), DAG, Subtarget);
- case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
+ case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
case ISD::SDIV:
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const;
if (Subtarget.hasSPE())
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
- setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
+ setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom);
// If we're enabling GP optimizations, use hardware square root
if (!Subtarget.hasFSQRT() &&
return FP;
}
-SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue PPCTargetLowering::LowerGET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc dl(Op);
/*
The rounding mode is in bits 30:31 of FPSR, and has the following
10 Round to +inf
11 Round to -inf
- FLT_ROUNDS, on the other hand, expects the following:
+ GET_ROUNDING, on the other hand, expects the following:
-1 Undefined
0 Round to 0
1 Round to nearest
case ISD::STRICT_SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
- case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
+ case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
// Lower 64-bit shifts.
case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
const SDLoc &dl) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
XLenVT, Legal);
- setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
+ setOperationAction(ISD::GET_ROUNDING, XLenVT, Custom);
setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
}
case ISD::MSCATTER:
case ISD::VP_SCATTER:
return lowerMaskedScatter(Op, DAG);
- case ISD::FLT_ROUNDS_:
+ case ISD::GET_ROUNDING:
return lowerGET_ROUNDING(Op, DAG);
case ISD::SET_ROUNDING:
return lowerSET_ROUNDING(Op, DAG);
if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
Results.push_back(V);
break;
- case ISD::FLT_ROUNDS_: {
+ case ISD::GET_ROUNDING: {
SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
- SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
+ SDValue Res = DAG.getNode(ISD::GET_ROUNDING, DL, VTs, N->getOperand(0));
Results.push_back(Res.getValue(0));
Results.push_back(Res.getValue(1));
break;
setOperationAction(ISD::FREM , MVT::f128 , Expand);
if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
- setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
+ setOperationAction(ISD::GET_ROUNDING , MVT::i32 , Custom);
setOperationAction(ISD::SET_ROUNDING , MVT::Other, Custom);
}
}
}
-SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
+ SelectionDAG &DAG) const {
/*
The rounding mode is in bits 11:10 of FPSR, and has the following
settings:
10 Round to +inf
11 Round to 0
- FLT_ROUNDS, on the other hand, expects the following:
+ GET_ROUNDING, on the other hand, expects the following:
-1 Undefined
0 Round to 0
1 Round to nearest
return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
- case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
+ case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG);
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_FP_TO_INT128(SDValue Op, SelectionDAG &DAG,
--- /dev/null
+RUN: llvm-dis %p/Inputs/auto_upgrade_flt_rounds.bc -o - | FileCheck %s
+
+CHECK-LABEL: define signext i32 @get_rounds()
+CHECK: tail call i32 @llvm.get.rounding()
; CHECK: add w8, w8, #1024, lsl #12
; CHECK: ubfx w0, w8, #22, #2
; CHECK: ret
- %1 = tail call i32 @llvm.flt.rounds()
+ %1 = tail call i32 @llvm.get.rounding()
ret i32 %1
}
-declare i32 @llvm.flt.rounds() #0
+declare i32 @llvm.get.rounding() #0
declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) #0
declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) #0
declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) #0
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
declare void @llvm.set.rounding(i32)
attributes #0 = { "strictfp" }
br i1 undef, label %4, label %7
; <label>:4 ; preds = %3
- %5 = call i32 @llvm.flt.rounds()
+ %5 = call i32 @llvm.get.rounding()
%6 = icmp eq i32 %5, 1
br i1 %6, label %8, label %7
ret void
}
-declare i32 @llvm.flt.rounds() nounwind
+declare i32 @llvm.get.rounding() nounwind
define void @strtod() {
entry:
; CHECK: vmrs r{{[0-9]+}}, fpscr
- %0 = call i32 @llvm.flt.rounds()
+ %0 = call i32 @llvm.get.rounding()
%tobool = icmp ne i32 %0, 0
br i1 %tobool, label %if.then, label %if.end
declare void @llvm.arm.set.fpscr(i32)
; Function Attrs: nounwind
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
bb:
%tmp = alloca %struct.wibble, align 4
%tmp1 = bitcast %struct.wibble* %tmp to i8*
- %tmp2 = tail call i32 @llvm.flt.rounds()
+ %tmp2 = tail call i32 @llvm.get.rounding()
%tmp3 = ptrtoint %struct.wibble* %tmp to i32
%tmp4 = sitofp i32 %tmp3 to double
%tmp5 = fmul double %tmp4, 0x0123456789ABCDEF
ret i32 undef
}
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
declare i32 @zot(...)
define i16 @foo() {
entry:
- %0 = call i32 @llvm.flt.rounds()
+ %0 = call i32 @llvm.get.rounding()
%1 = trunc i32 %0 to i16
ret i16 %1
}
-declare i32 @llvm.flt.rounds() nounwind
+declare i32 @llvm.get.rounding() nounwind
%retval = alloca i32 ; <ptr> [#uses=2]
%tmp = alloca i32 ; <ptr> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp1 = call i32 @llvm.flt.rounds( ) ; <i32> [#uses=1]
+ %tmp1 = call i32 @llvm.get.rounding( ) ; <i32> [#uses=1]
store i32 %tmp1, ptr %tmp, align 4
%tmp2 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1]
store i32 %tmp2, ptr %retval, align 4
ret i32 %retval3
}
-declare i32 @llvm.flt.rounds() nounwind
+declare i32 @llvm.get.rounding() nounwind
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
define i32 @test_flt_rounds() nounwind {
; RV32I-LABEL: test_flt_rounds:
; RV64I: # %bb.0:
; RV64I-NEXT: li a0, 1
; RV64I-NEXT: ret
- %1 = call i32 @llvm.flt.rounds()
+ %1 = call i32 @llvm.get.rounding()
ret i32 %1
}
; RV64IF-NEXT: srl a0, a1, a0
; RV64IF-NEXT: andi a0, a0, 7
; RV64IF-NEXT: ret
- %rm = call i32 @llvm.flt.rounds()
+ %rm = call i32 @llvm.get.rounding()
ret i32 %rm
}
}
declare void @llvm.set.rounding(i32)
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-sse2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=X86
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=X64
-declare i32 @llvm.flt.rounds()
+declare i32 @llvm.get.rounding()
define i32 @test_flt_rounds() nounwind {
; X86-LABEL: test_flt_rounds:
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: andl $3, %eax
; X64-NEXT: retq
- %1 = call i32 @llvm.flt.rounds()
+ %1 = call i32 @llvm.get.rounding()
ret i32 %1
}
; X64-NEXT: retq
entry:
%call = tail call i32 @fesetround(i32 1024)
- %0 = tail call i32 @llvm.flt.rounds()
+ %0 = tail call i32 @llvm.get.rounding()
%cmp = icmp ne i32 %0, 3
%spec.select = zext i1 %cmp to i32
%call1 = tail call i32 @fesetround(i32 0)
- %1 = tail call i32 @llvm.flt.rounds()
+ %1 = tail call i32 @llvm.get.rounding()
%cmp2 = icmp eq i32 %1, 1
%inc4 = select i1 %cmp, i32 2, i32 1
%errs.1 = select i1 %cmp2, i32 %spec.select, i32 %inc4
%call6 = tail call i32 @fesetround(i32 3072)
- %2 = tail call i32 @llvm.flt.rounds()
+ %2 = tail call i32 @llvm.get.rounding()
%cmp7 = icmp ne i32 %2, 0
%inc9 = zext i1 %cmp7 to i32
%spec.select22 = add nuw nsw i32 %errs.1, %inc9
%call11 = tail call i32 @fesetround(i32 2048)
- %3 = tail call i32 @llvm.flt.rounds()
+ %3 = tail call i32 @llvm.get.rounding()
%cmp12 = icmp ne i32 %3, 2
%inc14.neg = sext i1 %cmp12 to i32
%cmp16 = icmp ne i32 %spec.select22, %inc14.neg