SDTFPToIntOp, [SDNPHasChain]>;
def strict_fp_to_uint : SDNode<"ISD::STRICT_FP_TO_UINT",
SDTFPToIntOp, [SDNPHasChain]>;
+def strict_sint_to_fp : SDNode<"ISD::STRICT_SINT_TO_FP",
+ SDTIntToFPOp, [SDNPHasChain]>;
+def strict_uint_to_fp : SDNode<"ISD::STRICT_UINT_TO_FP",
+ SDTIntToFPOp, [SDNPHasChain]>;
def setcc : SDNode<"ISD::SETCC" , SDTSetCC>;
def select : SDNode<"ISD::SELECT" , SDTSelect>;
def any_fp_to_uint : PatFrags<(ops node:$src),
[(strict_fp_to_uint node:$src),
(fp_to_uint node:$src)]>;
+def any_sint_to_fp : PatFrags<(ops node:$src),
+ [(strict_sint_to_fp node:$src),
+ (sint_to_fp node:$src)]>;
+def any_uint_to_fp : PatFrags<(ops node:$src),
+ [(strict_uint_to_fp node:$src),
+ (uint_to_fp node:$src)]>;
multiclass binary_atomic_op_ord<SDNode atomic_op> {
def #NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val),
setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal);
if (Subtarget.hasFPExtension())
setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);
+
+ // And similarly for STRICT_[SU]INT_TO_FP.
+ setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal);
+ if (Subtarget.hasFPExtension())
+ setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal);
}
}
if (!Subtarget.hasFPExtension()) {
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
}
// We have native support for a 64-bit CTLZ, via FLOGR.
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);
+ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
+ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal);
}
if (Subtarget.hasVectorEnhancements2()) {
setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);
+ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
+ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
+ setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal);
}
// Handle floating-point types.
// Convert a signed integer register value to a floating-point one.
let Uses = [FPC], mayRaiseFPException = 1 in {
- def CEFBR : UnaryRRE<"cefbr", 0xB394, sint_to_fp, FP32, GR32>;
- def CDFBR : UnaryRRE<"cdfbr", 0xB395, sint_to_fp, FP64, GR32>;
- def CXFBR : UnaryRRE<"cxfbr", 0xB396, sint_to_fp, FP128, GR32>;
+ def CEFBR : UnaryRRE<"cefbr", 0xB394, any_sint_to_fp, FP32, GR32>;
+ def CDFBR : UnaryRRE<"cdfbr", 0xB395, any_sint_to_fp, FP64, GR32>;
+ def CXFBR : UnaryRRE<"cxfbr", 0xB396, any_sint_to_fp, FP128, GR32>;
- def CEGBR : UnaryRRE<"cegbr", 0xB3A4, sint_to_fp, FP32, GR64>;
- def CDGBR : UnaryRRE<"cdgbr", 0xB3A5, sint_to_fp, FP64, GR64>;
- def CXGBR : UnaryRRE<"cxgbr", 0xB3A6, sint_to_fp, FP128, GR64>;
+ def CEGBR : UnaryRRE<"cegbr", 0xB3A4, any_sint_to_fp, FP32, GR64>;
+ def CDGBR : UnaryRRE<"cdgbr", 0xB3A5, any_sint_to_fp, FP64, GR64>;
+ def CXGBR : UnaryRRE<"cxgbr", 0xB3A6, any_sint_to_fp, FP128, GR64>;
}
// The FP extension feature provides versions of the above that allow
def CXLGBR : TernaryRRFe<"cxlgbr", 0xB3A2, FP128, GR64>;
}
- def : Pat<(f32 (uint_to_fp GR32:$src)), (CELFBR 0, GR32:$src, 0)>;
- def : Pat<(f64 (uint_to_fp GR32:$src)), (CDLFBR 0, GR32:$src, 0)>;
- def : Pat<(f128 (uint_to_fp GR32:$src)), (CXLFBR 0, GR32:$src, 0)>;
+ def : Pat<(f32 (any_uint_to_fp GR32:$src)), (CELFBR 0, GR32:$src, 0)>;
+ def : Pat<(f64 (any_uint_to_fp GR32:$src)), (CDLFBR 0, GR32:$src, 0)>;
+ def : Pat<(f128 (any_uint_to_fp GR32:$src)), (CXLFBR 0, GR32:$src, 0)>;
- def : Pat<(f32 (uint_to_fp GR64:$src)), (CELGBR 0, GR64:$src, 0)>;
- def : Pat<(f64 (uint_to_fp GR64:$src)), (CDLGBR 0, GR64:$src, 0)>;
- def : Pat<(f128 (uint_to_fp GR64:$src)), (CXLGBR 0, GR64:$src, 0)>;
+ def : Pat<(f32 (any_uint_to_fp GR64:$src)), (CELGBR 0, GR64:$src, 0)>;
+ def : Pat<(f64 (any_uint_to_fp GR64:$src)), (CDLGBR 0, GR64:$src, 0)>;
+ def : Pat<(f128 (any_uint_to_fp GR64:$src)), (CXLGBR 0, GR64:$src, 0)>;
}
// Convert a floating-point register value to a signed integer value,
def VCDGB : TernaryVRRa<"vcdgb", 0xE7C3, null_frag, v128db, v128g, 3, 0>;
def WCDGB : TernaryVRRa<"wcdgb", 0xE7C3, null_frag, v64db, v64g, 3, 8>;
}
- def : FPConversion<VCDGB, sint_to_fp, v128db, v128g, 0, 0>;
+ def : FPConversion<VCDGB, any_sint_to_fp, v128db, v128g, 0, 0>;
let Predicates = [FeatureVectorEnhancements2] in {
let Uses = [FPC], mayRaiseFPException = 1 in {
let isAsmParserOnly = 1 in
def VCEFB : TernaryVRRa<"vcefb", 0xE7C3, null_frag, v128sb, v128g, 2, 0>;
def WCEFB : TernaryVRRa<"wcefb", 0xE7C3, null_frag, v32sb, v32f, 2, 8>;
}
- def : FPConversion<VCEFB, sint_to_fp, v128sb, v128f, 0, 0>;
+ def : FPConversion<VCEFB, any_sint_to_fp, v128sb, v128f, 0, 0>;
}
// Convert from logical.
def VCDLGB : TernaryVRRa<"vcdlgb", 0xE7C1, null_frag, v128db, v128g, 3, 0>;
def WCDLGB : TernaryVRRa<"wcdlgb", 0xE7C1, null_frag, v64db, v64g, 3, 8>;
}
- def : FPConversion<VCDLGB, uint_to_fp, v128db, v128g, 0, 0>;
+ def : FPConversion<VCDLGB, any_uint_to_fp, v128db, v128g, 0, 0>;
let Predicates = [FeatureVectorEnhancements2] in {
let Uses = [FPC], mayRaiseFPException = 1 in {
let isAsmParserOnly = 1 in
def VCELFB : TernaryVRRa<"vcelfb", 0xE7C1, null_frag, v128sb, v128g, 2, 0>;
def WCELFB : TernaryVRRa<"wcelfb", 0xE7C1, null_frag, v32sb, v32f, 2, 8>;
}
- def : FPConversion<VCELFB, uint_to_fp, v128sb, v128f, 0, 0>;
+ def : FPConversion<VCELFB, any_uint_to_fp, v128sb, v128f, 0, 0>;
}
// Convert to fixed.
--- /dev/null
+; Test strict conversions of signed i32s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
+
+; Check i32->f32.
+define float @f1(i32 %i) #0 {
+; CHECK-LABEL: f1:
+; CHECK: cefbr %f0, %r2
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) #0 {
+; CHECK-LABEL: f2:
+; CHECK: cdfbr %f0, %r2
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: cxfbr %f0, %r2
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+attributes #0 = { strictfp }
--- /dev/null
+; Test strict conversions of unsigned i32s to floating-point values (z10 only).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
+
+; Check i32->f32. There is no native instruction, so we must promote
+; to i64 first.
+define float @f1(i32 %i) #0 {
+; CHECK-LABEL: f1:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cegbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) #0 {
+; CHECK-LABEL: f2:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cdgbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cxgbr %f0, [[REGISTER]]
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+attributes #0 = { strictfp }
--- /dev/null
+; Test strict conversions of signed i64s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
+
+; Test i64->f32.
+define float @f1(i64 %i) #0 {
+; CHECK-LABEL: f1:
+; CHECK: cegbr %f0, %r2
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Test i64->f64.
+define double @f2(i64 %i) #0 {
+; CHECK-LABEL: f2:
+; CHECK: cdgbr %f0, %r2
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Test i64->f128.
+define void @f3(i64 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: cxgbr %f0, %r2
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+attributes #0 = { strictfp }
--- /dev/null
+; Test strict conversions of unsigned i64s to floating-point values (z10 only).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
+
+declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
+
+; Test i64->f32. There's no native support for unsigned i64-to-fp conversions,
+; but we should be able to implement them using signed i64-to-fp conversions.
+define float @f1(i64 %i) #0 {
+; CHECK-LABEL: f1:
+; CHECK: cegbr
+; CHECK: aebr
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Test i64->f64.
+define double @f2(i64 %i) #0 {
+; CHECK-LABEL: f2:
+; CHECK: ldgr
+; CHECK: adbr
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Test i64->f128.
+define void @f3(i64 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: cxgbr
+; CHECK: axbr
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+attributes #0 = { strictfp }
--- /dev/null
+; Test strict conversions of unsigned integers to floating-point values
+; (z196 and above).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
+
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
+
+declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
+
+; Check i32->f32.
+define float @f1(i32 %i) #0 {
+; CHECK-LABEL: f1:
+; CHECK: celfbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) #0 {
+; CHECK-LABEL: f2:
+; CHECK: cdlfbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: cxlfbr %f0, 0, %r2, 0
+; CHECK-DAG: std %f0, 0(%r3)
+; CHECK-DAG: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Check i64->f32.
+define float @f4(i64 %i) #0 {
+; CHECK-LABEL: f4:
+; CHECK: celgbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %conv
+}
+
+; Check i64->f64.
+define double @f5(i64 %i) #0 {
+; CHECK-LABEL: f5:
+; CHECK: cdlgbr %f0, 0, %r2, 0
+; CHECK: br %r14
+ %conv = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %conv
+}
+
+; Check i64->f128.
+define void @f6(i64 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f6:
+; CHECK: cxlgbr %f0, 0, %r2, 0
+; CHECK-DAG: std %f0, 0(%r3)
+; CHECK-DAG: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+attributes #0 = { strictfp }
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
-; FIXME: llvm.experimental.constrained.[su]itofp does not yet exist
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
+
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
+; Test signed i32->f128.
+define void @f1(i32 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f1:
+; CHECK: cxfbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test signed i64->f128.
+define void @f2(i64 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f2:
+; CHECK: cxgbr %f0, %r2
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i32->f128.
+define void @f3(i32 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f3:
+; CHECK: cxlfbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
+; Test unsigned i64->f128.
+define void @f4(i64 %i, fp128 *%dst) #0 {
+; CHECK-LABEL: f4:
+; CHECK: cxlgbr %f0, 0, %r2, 0
+; CHECK: vmrhg %v0, %v0, %v2
+; CHECK: vst %v0, 0(%r3)
+; CHECK: br %r14
+ %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %i,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
+
; Test signed f128->i32.
define i32 @f5(fp128 *%src) #0 {
; CHECK-LABEL: f5:
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
-; FIXME: llvm.experimental.constrained.[su]itofp does not yet exist
-
declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
+declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
; Test conversion of f64s to signed i64s.
define <2 x i64> @f1(<2 x double> %doubles) #0 {
ret <2 x i64> %dwords
}
+; Test conversion of signed i64s to f64s.
+define <2 x double> @f3(<2 x i64> %dwords) #0 {
+; CHECK-LABEL: f3:
+; CHECK: vcdgb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %doubles = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %doubles
+}
+
+; Test conversion of unsigned i64s to f64s.
+define <2 x double> @f4(<2 x i64> %dwords) #0 {
+; CHECK-LABEL: f4:
+; CHECK: vcdlgb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %doubles = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %doubles
+}
+
; Test conversion of f64s to signed i32s, which must compile.
define void @f5(<2 x double> %doubles, <2 x i32> *%ptr) #0 {
%words = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %doubles,
ret void
}
+; Test conversion of signed i32s to f64s, which must compile.
+define <2 x double> @f7(<2 x i32> *%ptr) #0 {
+ %words = load <2 x i32>, <2 x i32> *%ptr
+ %doubles = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %words,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %doubles
+}
+
+; Test conversion of unsigned i32s to f64s, which must compile.
+define <2 x double> @f8(<2 x i32> *%ptr) #0 {
+ %words = load <2 x i32>, <2 x i32> *%ptr
+ %doubles = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %words,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %doubles
+}
+
; Test conversion of f32s to signed i64s, which must compile.
define <2 x i64> @f9(<2 x float> *%ptr) #0 {
%floats = load <2 x float>, <2 x float> *%ptr
ret <2 x i64> %dwords
}
+; Test conversion of signed i64s to f32, which must compile.
+define void @f11(<2 x i64> %dwords, <2 x float> *%ptr) #0 {
+ %floats = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store <2 x float> %floats, <2 x float> *%ptr
+ ret void
+}
+
+; Test conversion of unsigned i64s to f32, which must compile.
+define void @f12(<2 x i64> %dwords, <2 x float> *%ptr) #0 {
+ %floats = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ store <2 x float> %floats, <2 x float> *%ptr
+ ret void
+}
+
attributes #0 = { strictfp }
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z15 | FileCheck %s
-; FIXME: llvm.experimental.constrained.[su]itofp does not yet exist
-
declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
; Test conversion of f32s to signed i32s.
define <4 x i32> @f1(<4 x float> %floats) #0 {
ret <4 x i32> %words
}
+; Test conversion of signed i32s to f32s.
+define <4 x float> @f3(<4 x i32> %dwords) #0 {
+; CHECK-LABEL: f3:
+; CHECK: vcefb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %floats = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %floats
+}
+
+; Test conversion of unsigned i32s to f32s.
+define <4 x float> @f4(<4 x i32> %dwords) #0 {
+; CHECK-LABEL: f4:
+; CHECK: vcelfb %v24, %v24, 0, 0
+; CHECK: br %r14
+ %floats = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %dwords,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %floats
+}
+
attributes #0 = { strictfp }