This function returns the same values as the libm ``round``
functions would, and handles error conditions in the same way.
+'``llvm.roundeven.*``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+This is an overloaded intrinsic. You can use ``llvm.roundeven`` on any
+floating-point or vector of floating-point type. Not all targets support
+all types however.
+
+::
+
+ declare float @llvm.roundeven.f32(float %Val)
+ declare double @llvm.roundeven.f64(double %Val)
+ declare x86_fp80 @llvm.roundeven.f80(x86_fp80 %Val)
+ declare fp128 @llvm.roundeven.f128(fp128 %Val)
+ declare ppc_fp128 @llvm.roundeven.ppcf128(ppc_fp128 %Val)
+
+Overview:
+"""""""""
+
+The '``llvm.roundeven.*``' intrinsics returns the operand rounded to the nearest
+integer in floating-point format rounding halfway cases to even (that is, to the
+nearest value that is an even integer).
+
+Arguments:
+""""""""""
+
+The argument and return value are floating-point numbers of the same type.
+
+Semantics:
+""""""""""
+
+This function implements IEEE-754 operation ``roundToIntegralTiesToEven``. It
+also behaves in the same way as C standard function ``roundeven``, except that
+it does not raise floating point exceptions.
+
+
'``llvm.lround.*``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
would and handles error conditions in the same way.
+'``llvm.experimental.constrained.roundeven``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <type>
+ @llvm.experimental.constrained.roundeven(<type> <op1>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.roundeven``' intrinsic returns the first
+operand rounded to the nearest integer in floating-point format, rounding
+halfway cases to even (that is, to the nearest value that is an even integer),
+regardless of the current rounding direction.
+
+Arguments:
+""""""""""
+
+The first argument and the return value are floating-point numbers of the same
+type.
+
+The second argument specifies the exception behavior as described above.
+
+Semantics:
+""""""""""
+
+This function implements IEEE-754 operation ``roundToIntegralTiesToEven``. It
+also behaves in the same way as C standard function ``roundeven`` and can signal
+the invalid operation exception for a SNAN operand.
+
+
'``llvm.experimental.constrained.lround``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/// double round(double x);
TLI_DEFINE_ENUM_INTERNAL(round)
TLI_DEFINE_STRING_INTERNAL("round")
+/// double roundeven(double x);
+TLI_DEFINE_ENUM_INTERNAL(roundeven)
+TLI_DEFINE_STRING_INTERNAL("roundeven")
+/// float roundevenf(float x);
+TLI_DEFINE_ENUM_INTERNAL(roundevenf)
+TLI_DEFINE_STRING_INTERNAL("roundevenf")
+/// long double roundevenl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(roundevenl)
+TLI_DEFINE_STRING_INTERNAL("roundevenl")
/// float roundf(float x);
TLI_DEFINE_ENUM_INTERNAL(roundf)
TLI_DEFINE_STRING_INTERNAL("roundf")
case Intrinsic::round:
ISDs.push_back(ISD::FROUND);
break;
+ case Intrinsic::roundeven:
+ ISDs.push_back(ISD::FROUNDEVEN);
+ break;
case Intrinsic::pow:
ISDs.push_back(ISD::FPOW);
break;
STRICT_FCEIL,
STRICT_FFLOOR,
STRICT_FROUND,
+ STRICT_FROUNDEVEN,
STRICT_FTRUNC,
STRICT_LROUND,
STRICT_LLROUND,
FRINT,
FNEARBYINT,
FROUND,
+ FROUNDEVEN,
FFLOOR,
LROUND,
LLROUND,
DAG_FUNCTION(powi, 2, 1, experimental_constrained_powi, FPOWI)
DAG_FUNCTION(rint, 1, 1, experimental_constrained_rint, FRINT)
DAG_FUNCTION(round, 1, 0, experimental_constrained_round, FROUND)
+DAG_FUNCTION(roundeven, 1, 0, experimental_constrained_roundeven, FROUNDEVEN)
DAG_FUNCTION(sin, 1, 1, experimental_constrained_sin, FSIN)
DAG_FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT)
DAG_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC)
def int_rint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_roundeven : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
[IntrNoMem]>;
def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_roundeven : Intrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_trunc : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty ]>;
HANDLE_LIBCALL(ROUND_F80, "roundl")
HANDLE_LIBCALL(ROUND_F128, "roundl")
HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
+HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf")
+HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven")
+HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl")
HANDLE_LIBCALL(FLOOR_F32, "floorf")
HANDLE_LIBCALL(FLOOR_F64, "floor")
HANDLE_LIBCALL(FLOOR_F80, "floorl")
case Intrinsic::ceil:
case Intrinsic::floor:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::trunc:
case Intrinsic::nearbyint:
case Intrinsic::rint:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
case Intrinsic::experimental_constrained_round:
+ case Intrinsic::experimental_constrained_roundeven:
case Intrinsic::experimental_constrained_trunc:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_rint:
return ConstantFP::get(Ty->getContext(), U);
}
+ if (IntrinsicID == Intrinsic::roundeven) {
+ U.roundToIntegral(APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(Ty->getContext(), U);
+ }
+
if (IntrinsicID == Intrinsic::ceil) {
U.roundToIntegral(APFloat::rmTowardPositive);
return ConstantFP::get(Ty->getContext(), U);
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::canonicalize:
return true;
}
case Intrinsic::trunc:
case Intrinsic::ceil:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::nearbyint:
case Intrinsic::rint: {
// floor (sitofp x) -> sitofp x
case LibFunc_round:
case LibFunc_roundf:
case LibFunc_roundl:
+ case LibFunc_roundeven:
+ case LibFunc_roundevenf:
+ case LibFunc_roundevenl:
case LibFunc_sin:
case LibFunc_sinf:
case LibFunc_sinh:
case LibFunc_roundf:
case LibFunc_roundl:
return Intrinsic::round;
+ case LibFunc_roundeven:
+ case LibFunc_roundevenf:
+ case LibFunc_roundevenl:
+ return Intrinsic::roundeven;
case LibFunc_pow:
case LibFunc_powf:
case LibFunc_powl:
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
+ case Intrinsic::roundeven:
return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
case Intrinsic::sqrt:
return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::pow:
case Intrinsic::fma:
case Intrinsic::fmuladd:
ReplaceFPIntrinsicWithCall(CI, "roundf", "round", "roundl");
break;
}
+ case Intrinsic::roundeven: {
+ ReplaceFPIntrinsicWithCall(CI, "roundevenf", "roundeven", "roundevenl");
+ break;
+ }
case Intrinsic::copysign: {
ReplaceFPIntrinsicWithCall(CI, "copysignf", "copysign", "copysignl");
break;
RTLIB::ROUND_F128,
RTLIB::ROUND_PPCF128, Results);
break;
+ case ISD::FROUNDEVEN:
+ case ISD::STRICT_FROUNDEVEN:
+ ExpandFPLibCall(Node, RTLIB::ROUNDEVEN_F32,
+ RTLIB::ROUNDEVEN_F64,
+ RTLIB::ROUNDEVEN_F80,
+ RTLIB::ROUNDEVEN_F128,
+ RTLIB::ROUNDEVEN_PPCF128, Results);
+ break;
case ISD::FPOWI:
case ISD::STRICT_FPOWI: {
RTLIB::Libcall LC;
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FTRUNC:
case ISD::FNEG:
case ISD::FSQRT:
case ISD::FRINT: R = SoftenFloatRes_FRINT(N); break;
case ISD::STRICT_FROUND:
case ISD::FROUND: R = SoftenFloatRes_FROUND(N); break;
+ case ISD::STRICT_FROUNDEVEN:
+ case ISD::FROUNDEVEN: R = SoftenFloatRes_FROUNDEVEN(N); break;
case ISD::STRICT_FSIN:
case ISD::FSIN: R = SoftenFloatRes_FSIN(N); break;
case ISD::STRICT_FSQRT:
RTLIB::ROUND_PPCF128));
}
+SDValue DAGTypeLegalizer::SoftenFloatRes_FROUNDEVEN(SDNode *N) {
+ return SoftenFloatRes_Unary(N, GetFPLibCall(N->getValueType(0),
+ RTLIB::ROUNDEVEN_F32,
+ RTLIB::ROUNDEVEN_F64,
+ RTLIB::ROUNDEVEN_F80,
+ RTLIB::ROUNDEVEN_F128,
+ RTLIB::ROUNDEVEN_PPCF128));
+}
+
SDValue DAGTypeLegalizer::SoftenFloatRes_FSIN(SDNode *N) {
return SoftenFloatRes_Unary(N, GetFPLibCall(N->getValueType(0),
RTLIB::SIN_F32,
case ISD::FRINT: ExpandFloatRes_FRINT(N, Lo, Hi); break;
case ISD::STRICT_FROUND:
case ISD::FROUND: ExpandFloatRes_FROUND(N, Lo, Hi); break;
+ case ISD::STRICT_FROUNDEVEN:
+ case ISD::FROUNDEVEN: ExpandFloatRes_FROUNDEVEN(N, Lo, Hi); break;
case ISD::STRICT_FSIN:
case ISD::FSIN: ExpandFloatRes_FSIN(N, Lo, Hi); break;
case ISD::STRICT_FSQRT:
RTLIB::ROUND_PPCF128), Lo, Hi);
}
+void DAGTypeLegalizer::ExpandFloatRes_FROUNDEVEN(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0),
+ RTLIB::ROUNDEVEN_F32,
+ RTLIB::ROUNDEVEN_F64,
+ RTLIB::ROUNDEVEN_F80,
+ RTLIB::ROUNDEVEN_F128,
+ RTLIB::ROUNDEVEN_PPCF128), Lo, Hi);
+}
+
void DAGTypeLegalizer::ExpandFloatRes_FSIN(SDNode *N,
SDValue &Lo, SDValue &Hi) {
ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0),
case ISD::FNEG:
case ISD::FRINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::FREEZE:
case ISD::FRINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
SDValue SoftenFloatRes_FREM(SDNode *N);
SDValue SoftenFloatRes_FRINT(SDNode *N);
SDValue SoftenFloatRes_FROUND(SDNode *N);
+ SDValue SoftenFloatRes_FROUNDEVEN(SDNode *N);
SDValue SoftenFloatRes_FSIN(SDNode *N);
SDValue SoftenFloatRes_FSQRT(SDNode *N);
SDValue SoftenFloatRes_FSUB(SDNode *N);
void ExpandFloatRes_FREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FRINT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FROUND (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandFloatRes_FROUNDEVEN(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSIN (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSQRT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FSUB (SDNode *N, SDValue &Lo, SDValue &Hi);
case ISD::FRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FFLOOR:
case ISD::FP_ROUND:
case ISD::FP_EXTEND:
case ISD::FP_TO_UINT:
case ISD::FRINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::FP_TO_UINT:
case ISD::FRINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::FNEARBYINT:
case ISD::FRINT:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC: {
case ISD::FFLOOR:
case ISD::FCEIL:
case ISD::FROUND:
+ case ISD::FROUNDEVEN:
case ISD::FRINT:
case ISD::FNEARBYINT: {
if (SNaN)
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::canonicalize: {
unsigned Opcode;
switch (Intrinsic) {
case Intrinsic::rint: Opcode = ISD::FRINT; break;
case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
case Intrinsic::round: Opcode = ISD::FROUND; break;
+ case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
}
case ISD::STRICT_FNEARBYINT: return "strict_fnearbyint";
case ISD::FROUND: return "fround";
case ISD::STRICT_FROUND: return "strict_fround";
+ case ISD::FROUNDEVEN: return "froundeven";
+ case ISD::STRICT_FROUNDEVEN: return "strict_froundeven";
case ISD::FEXP: return "fexp";
case ISD::STRICT_FEXP: return "strict_fexp";
case ISD::FEXP2: return "fexp2";
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
+ setOperationAction(ISD::FROUNDEVEN, VT, Expand);
setOperationAction(ISD::FPOWI, VT, Expand);
// These operations default to expand for vector types.
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FROUND, VT, Expand);
+ setOperationAction(ISD::FROUNDEVEN, VT, Expand);
setOperationAction(ISD::LROUND, VT, Expand);
setOperationAction(ISD::LLROUND, VT, Expand);
setOperationAction(ISD::LRINT, VT, Expand);
case Intrinsic::ceil:
case Intrinsic::floor:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::nearbyint:
case Intrinsic::rint:
case Intrinsic::trunc: {
case Intrinsic::nearbyint:
case Intrinsic::rint:
case Intrinsic::round:
+ case Intrinsic::roundeven:
case Intrinsic::trunc: {
Value *Src = II->getArgOperand(0);
if (!Src->hasOneUse())
return replaceUnaryCall(CI, Builder, Intrinsic::floor);
case LibFunc_round:
return replaceUnaryCall(CI, Builder, Intrinsic::round);
+ case LibFunc_roundeven:
+ return replaceUnaryCall(CI, Builder, Intrinsic::roundeven);
case LibFunc_nearbyint:
return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
case LibFunc_rint:
--- /dev/null
+; RUN: llc < %s | FileCheck %s
+
+; This test checks default lowering of the intrinsics operating floating point
+; values. MSP430 is used as a target in this test because it does not have
+; native FP support, so it won't get custom lowering for these intrinsics.
+;
+; REQUIRES: msp430-registered-target
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"
+target triple = "msp430---elf"
+
+
+define float @roundeven_01(float %x) {
+entry:
+ %res = call float @llvm.roundeven.f32(float %x)
+ ret float %res
+}
+; CHECK-LABEL: roundeven_01:
+; CHECK: call #roundeven
+
+declare float @llvm.roundeven.f32(float %x)
declare double @llvm.trunc.f64(double)
declare float @llvm.round.f32(float)
declare double @llvm.round.f64(double)
+declare float @llvm.roundeven.f32(float)
+declare double @llvm.roundeven.f64(double)
declare float @llvm.copysign.f32(float, float)
declare double @llvm.copysign.f64(double, double)
%trunc64 = call double @llvm.trunc.f64(double 0.000000e+00)
%round32 = call float @llvm.round.f32(float 0.000000e+00)
%round64 = call double @llvm.round.f64(double 0.000000e+00)
+ %roundeven32 = call float @llvm.roundeven.f32(float 0.000000e+00)
+ %roundeven64 = call double @llvm.roundeven.f64(double 0.000000e+00)
%copysign32 = call float @llvm.copysign.f32(float 0.000000e+00, float 0.000000e+00)
%copysign64 = call double @llvm.copysign.f64(double 0.000000e+00, double 0.000000e+00)
ret i32 0
declare double @floor(double)
declare double @ceil(double)
declare double @round(double)
+declare double @roundeven(double)
declare double @nearbyint(double)
declare double @trunc(double)
declare double @fabs(double)
declare double @llvm.round.f64(double)
declare <2 x double> @llvm.round.v2f64(<2 x double>)
+declare double @llvm.roundeven.f64(double)
+declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
+
declare double @llvm.trunc.f64(double)
declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
ret float %F
}
+define float @test_shrink_libcall_roundeven(float %C) {
+; CHECK-LABEL: @test_shrink_libcall_roundeven(
+; CHECK-NEXT: [[F:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[F]]
+;
+ %D = fpext float %C to double
+ ; --> roundeven
+ %E = call double @roundeven(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
define float @test_shrink_libcall_nearbyint(float %C) {
; CHECK-LABEL: @test_shrink_libcall_nearbyint(
; CHECK-NEXT: [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
ret float %F
}
+define float @test_shrink_intrin_roundeven(float %C) {
+; CHECK-LABEL: @test_shrink_intrin_roundeven(
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %D = fpext float %C to double
+ %E = call double @llvm.roundeven.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
define float @test_shrink_intrin_trunc(float %C) {
; CHECK-LABEL: @test_shrink_intrin_trunc(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
ret <2 x float> %F
}
+define <2 x float> @test_shrink_intrin_roundeven_multi_use(<2 x float> %C) {
+; CHECK-LABEL: @test_shrink_intrin_roundeven_multi_use(
+; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
+; CHECK-NEXT: [[E:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[D]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[D]])
+; CHECK-NEXT: call void @use_v2f64(<2 x double> [[E]])
+; CHECK-NEXT: ret <2 x float> [[F]]
+;
+ %D = fpext <2 x float> %C to <2 x double>
+ %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
+ %F = fptrunc <2 x double> %E to <2 x float>
+ call void @use_v2f64(<2 x double> %D)
+ call void @use_v2f64(<2 x double> %E)
+ ret <2 x float> %F
+}
+
define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
; CHECK-NEXT: [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
ret float %F
}
+define float @test_no_shrink_intrin_roundeven(double %D) {
+; CHECK-LABEL: @test_no_shrink_intrin_roundeven(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to float
+; CHECK-NEXT: ret float [[F]]
+;
+ %E = call double @llvm.roundeven.f64(double %D)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
define float @test_no_shrink_intrin_nearbyint(double %D) {
; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
ret float %F
}
+define float @test_shrink_float_convertible_constant_intrin_roundeven() {
+; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_roundeven(
+; CHECK-NEXT: ret float 2.000000e+00
+;
+ %E = call double @llvm.roundeven.f64(double 2.1)
+ %F = fptrunc double %E to float
+ ret float %F
+}
+
define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
; CHECK-NEXT: ret float 2.000000e+00
ret half %F
}
+define half @test_no_shrink_mismatched_type_intrin_roundeven(double %D) {
+; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_roundeven(
+; CHECK-NEXT: [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
+; CHECK-NEXT: [[F:%.*]] = fptrunc double [[E]] to half
+; CHECK-NEXT: ret half [[F]]
+;
+ %E = call double @llvm.roundeven.f64(double %D)
+ %F = fptrunc double %E to half
+ ret half %F
+}
+
define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
; CHECK-NEXT: [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
ret <2 x double> %E
}
+define <2 x double> @test_shrink_intrin_roundeven_fp16_vec(<2 x half> %C) {
+; CHECK-LABEL: @test_shrink_intrin_roundeven_fp16_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x half> @llvm.roundeven.v2f16(<2 x half> [[C:%.*]])
+; CHECK-NEXT: [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
+; CHECK-NEXT: ret <2 x double> [[E]]
+;
+ %D = fpext <2 x half> %C to <2 x double>
+ %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
+ ret <2 x double> %E
+}
+
define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
; CHECK-NEXT: [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
ret i1 %cmp
}
+define i1 @test6a(float %x, float %y) {
+; CHECK-LABEL: @test6a(
+; CHECK-NEXT: [[ROUND:%.*]] = call float @llvm.roundeven.f32(float %x)
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ROUND]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x.ext = fpext float %x to double
+ %round = call double @roundeven(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %round, %y.ext
+ ret i1 %cmp
+}
+
+define i1 @test6a_intrin(float %x, float %y) {
+; CHECK-LABEL: @test6a_intrin(
+; CHECK-NEXT: [[ROUND:%.*]] = call float @llvm.roundeven.f32(float %x)
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ROUND]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x.ext = fpext float %x to double
+ %round = call double @llvm.roundeven.f64(double %x.ext) nounwind readnone
+ %y.ext = fpext float %y to double
+ %cmp = fcmp oeq double %round, %y.ext
+ ret i1 %cmp
+}
+
define i1 @test7(float %x, float %y) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[TRUNC:%.*]] = call float @llvm.trunc.f32(float %x)
ret i1 %cmp
}
+define i1 @test13a(float %x, float %y) {
+; CHECK-LABEL: @test13a(
+; CHECK-NEXT: [[ROUND:%.*]] = call float @llvm.roundeven.f32(float %x)
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ROUND]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %round = call double @roundeven(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %round
+ ret i1 %cmp
+}
+
+define i1 @test13a_intrin(float %x, float %y) {
+; CHECK-LABEL: @test13a_intrin(
+; CHECK-NEXT: [[ROUND:%.*]] = call float @llvm.roundeven.f32(float %x)
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ROUND]], %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x.ext = fpext float %x to double
+ %y.ext = fpext float %y to double
+ %round = call double @llvm.roundeven.f64(double %x.ext) nounwind readnone
+ %cmp = fcmp oeq double %y.ext, %round
+ ret i1 %cmp
+}
+
define i1 @test14(float %x, float %y) {
; CHECK-LABEL: @test14(
; CHECK-NEXT: [[TRUNC:%.*]] = call float @llvm.trunc.f32(float %x)
declare double @nearbyint(double) nounwind readnone
declare double @rint(double) nounwind readnone
declare double @round(double) nounwind readnone
+declare double @roundeven(double) nounwind readnone
declare double @trunc(double) nounwind readnone
declare double @fmin(double, double) nounwind readnone
declare double @fmax(double, double) nounwind readnone
declare double @llvm.floor.f64(double) nounwind readnone
declare double @llvm.nearbyint.f64(double) nounwind readnone
declare double @llvm.round.f64(double) nounwind readnone
+declare double @llvm.roundeven.f64(double) nounwind readnone
declare double @llvm.trunc.f64(double) nounwind readnone
ret i1 %tmp
}
+define i1 @roundeven_nnan_src(double %arg) {
+; CHECK-LABEL: @roundeven_nnan_src(
+; CHECK-NEXT: ret i1 false
+;
+ %nnan = fadd nnan double %arg, 1.0
+ %op = call double @llvm.roundeven.f64(double %nnan)
+ %tmp = fcmp uno double %op, %op
+ ret i1 %tmp
+}
+
define i1 @known_nan_select(i1 %cond, double %arg0, double %arg1) {
; CHECK-LABEL: @known_nan_select(
; CHECK-NEXT: ret i1 true
declare double @llvm.rint.f64(double)
declare double @llvm.nearbyint.f64(double)
declare double @llvm.round.f64(double)
+declare double @llvm.roundeven.f64(double)
ret float %round
}
+define float @uitofp_roundeven(i32 %arg) {
+; CHECK-LABEL: @uitofp_roundeven(
+; CHECK-NEXT: [[CVT:%.*]] = uitofp i32 [[ARG:%.*]] to float
+; CHECK-NEXT: ret float [[CVT]]
+;
+ %cvt = uitofp i32 %arg to float
+ %round = call float @llvm.roundeven.f32(float %cvt)
+ ret float %round
+}
+
define float @sitofp_nearbyint(i32 %arg) {
; CHECK-LABEL: @sitofp_nearbyint(
; CHECK-NEXT: [[CVT:%.*]] = sitofp i32 [[ARG:%.*]] to float
declare float @llvm.trunc.f32(float) #0
declare float @llvm.ceil.f32(float) #0
declare float @llvm.round.f32(float) #0
+declare float @llvm.roundeven.f32(float) #0
declare float @llvm.nearbyint.f32(float) #0
declare float @llvm.rint.f32(float) #0
; CHECK: call float @llvm.minnum.f32
; CHECK: call float @llvm.maxnum.f32
; CHECK: call float @llvm.powi.f32
+; CHECK: call float @llvm.roundeven.f32
; CHECK: for.body:
define void @test(float %arg1, float %arg2) {
%tmp.11 = call float @llvm.minimum.f32(float %tmp.10, float %arg2)
%tmp.12 = call float @llvm.maximum.f32(float %tmp.11, float %arg2)
%tmp.13 = call float @llvm.powi.f32(float %tmp.12, i32 4)
- call void @consume(float %tmp.13)
+ %tmp.14 = call float @llvm.roundeven.f32(float %tmp.13)
+ call void @consume(float %tmp.14)
%IND.new = add i32 %IND, 1
br label %for.head
declare float @llvm.minimum.f32(float, float)
declare float @llvm.maximum.f32(float, float)
declare float @llvm.powi.f32(float, i32)
+declare float @llvm.roundeven.f32(float)
declare double @llvm.round.f64(double) nounwind readnone
+;CHECK-LABEL: @roundeven_f32(
+;CHECK: llvm.roundeven.v4f32
+;CHECK: ret void
+define void @roundeven_f32(i32 %n, float* noalias %y, float* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
+ %0 = load float, float* %arrayidx, align 4
+ %call = tail call float @llvm.roundeven.f32(float %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
+ store float %call, float* %arrayidx2, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.roundeven.f32(float) nounwind readnone
+
+;CHECK-LABEL: @roundeven_f64(
+;CHECK: llvm.roundeven.v4f64
+;CHECK: ret void
+define void @roundeven_f64(i32 %n, double* noalias %y, double* noalias %x) nounwind uwtable {
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
+ %0 = load double, double* %arrayidx, align 8
+ %call = tail call double @llvm.roundeven.f64(double %0) nounwind readnone
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
+ store double %call, double* %arrayidx2, align 8
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.roundeven.f64(double) nounwind readnone
+
;CHECK-LABEL: @fma_f32(
;CHECK: llvm.fma.v4f32
;CHECK: ret void
"declare double @round(double)\n"
"declare float @roundf(float)\n"
"declare x86_fp80 @roundl(x86_fp80)\n"
+ "declare double @roundeven(double)\n"
+ "declare float @roundevenf(float)\n"
+ "declare x86_fp80 @roundevenl(x86_fp80)\n"
"declare i32 @scanf(i8*, ...)\n"
"declare void @setbuf(%struct*, i8*)\n"
"declare i32 @setitimer(i32, %struct*, %struct*)\n"
EXPECT_EQ(II->getIntrinsicID(), Intrinsic::fma);
EXPECT_TRUE(II->hasNoInfs());
EXPECT_FALSE(II->hasNoNaNs());
+
+ Call = Builder.CreateUnaryIntrinsic(Intrinsic::roundeven, V);
+ II = cast<IntrinsicInst>(Call);
+ EXPECT_EQ(II->getIntrinsicID(), Intrinsic::roundeven);
+ EXPECT_FALSE(II->hasNoInfs());
+ EXPECT_FALSE(II->hasNoNaNs());
}
TEST_F(IRBuilderTest, IntrinsicsWithScalableVectors) {
EXPECT_FALSE(verifyModule(*M));
}
+TEST_F(IRBuilderTest, ConstrainedFPIntrinsics) {
+ IRBuilder<> Builder(BB);
+ Value *V;
+ Value *VDouble;
+ ConstrainedFPIntrinsic *CII;
+ GlobalVariable *GVDouble = new GlobalVariable(
+ *M, Type::getDoubleTy(Ctx), true, GlobalValue::ExternalLinkage, nullptr);
+ VDouble = Builder.CreateLoad(GVDouble->getValueType(), GVDouble);
+
+ Builder.setDefaultConstrainedExcept(fp::ebStrict);
+ Builder.setDefaultConstrainedRounding(RoundingMode::TowardZero);
+ Function *Fn = Intrinsic::getDeclaration(M.get(),
+ Intrinsic::experimental_constrained_roundeven, { Type::getDoubleTy(Ctx) });
+ V = Builder.CreateConstrainedFPCall(Fn, { VDouble });
+ CII = cast<ConstrainedFPIntrinsic>(V);
+ EXPECT_EQ(Intrinsic::experimental_constrained_roundeven, CII->getIntrinsicID());
+ EXPECT_EQ(fp::ebStrict, CII->getExceptionBehavior());
+}
+
TEST_F(IRBuilderTest, Lifetime) {
IRBuilder<> Builder(BB);
AllocaInst *Var1 = Builder.CreateAlloca(Builder.getInt8Ty());