Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 30 additions & 17 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -602,19 +602,20 @@ Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,

// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
static Value *emitBinaryMaybeConstrainedFPBuiltin(
CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID, llvm::FastMathFlags FMF = {}) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));

CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
return CGF.Builder.CreateConstrainedFPCall(
F, {Src0, Src1}, "", std::nullopt, std::nullopt, &FMF);
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
return CGF.Builder.CreateCall(F, {Src0, Src1}, "", nullptr, &FMF);
}
}

Expand Down Expand Up @@ -2828,10 +2829,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
case Builtin::BI__builtin_fmaxf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
case Builtin::BI__builtin_fmaxf128: {
llvm::FastMathFlags FMF = Builder.getFastMathFlags();
FMF.setNoSignedZeros();
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum, FMF));
}

case Builtin::BIfmin:
case Builtin::BIfminf:
Expand All @@ -2840,10 +2844,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
case Builtin::BI__builtin_fminf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
case Builtin::BI__builtin_fminf128: {
llvm::FastMathFlags FMF = Builder.getFastMathFlags();
FMF.setNoSignedZeros();
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum, FMF));
}

case Builtin::BIfmaximum_num:
case Builtin::BIfmaximum_numf:
Expand Down Expand Up @@ -3987,8 +3994,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Result = Builder.CreateBinaryIntrinsic(
Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
Op1, nullptr, "elt.max");
} else
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
} else {
FastMathFlags FMF;
FMF.setNoSignedZeros(true);
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/FMF, "elt.max");
}
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_min: {
Expand All @@ -4002,8 +4012,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Result = Builder.CreateBinaryIntrinsic(
Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
Op1, nullptr, "elt.min");
} else
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
} else {
FastMathFlags FMF;
FMF.setNoSignedZeros(true);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think this should apply in the elementwise case.

I also think it was a mistake to allow floating point in elementwise min/max

Copy link
Contributor Author

@wzssyqa wzssyqa Dec 8, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See: #129207

In that PR, we planned to use the same naming scheme
__builtin_elementwise_max -> max (fmax)
__builtin_elementwise_maxnum -> maxnum

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we are planning to drop float support of __builtin_elementwise, it should be in another patchset.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes this should be discussed separately. Feedback from library authors has been that different builtins for floats/ints are a bit of a pain

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But they are in fact, different operations. And you have many choices for which FP min/max.

Given this name doesn't have the historic fmin/fmax in it, I don't think this should take the fuzzy signed zero handling

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In fact I am planning to add __builtin_elementwise_maximumnum after this PR is merged.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree this should not continue to use minnum/maxnum, though that change should not be part of this PR.

minnum/maxnum behavior has never been consistent across targets (or across scenarios). But we could also add __builtin_elementwise_maximumnum/__builtin_elementwise_minimnumnum

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree this should not continue to use minnum/maxnum, though that change should not be part of this PR.

So can we merge this PR?

minnum/maxnum behavior has never been consistent across targets (or across scenarios).

In fact, all of the architectures that claims implement IEEE754-2008, has the same behavior:
AArch64, MIPSr6, LoongArch, PowerPC/VSX
That's why I'd plan to define minnum/maxnum as the same as these architectures.

But we could also add __builtin_elementwise_maximumnum/__builtin_elementwise_minimnumnum

I will do it.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So can we merge this PR?

The discussion in #137567 is still unresolved.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#137567 is about all opcodes, and in fact for min/max we have done with
#112852

Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/FMF, "elt.min");
}
return RValue::get(Result);
}

Expand Down
24 changes: 12 additions & 12 deletions clang/test/CodeGen/RISCV/math-builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,22 +134,22 @@ long double truncl(long double);
// RV32-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
// RV32-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV32-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV32-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV32-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV32-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV32-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV32-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV32-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV32-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV32-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV32-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV32-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV32-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
Expand Down Expand Up @@ -310,22 +310,22 @@ long double truncl(long double);
// RV64-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
// RV64-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV64-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV64-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV64-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV64-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV64-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV64-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV64-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV64-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV64-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV64-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV64-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV64-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
Expand Down
24 changes: 12 additions & 12 deletions clang/test/CodeGen/builtins-elementwise-math.c
Original file line number Diff line number Diff line change
Expand Up @@ -348,21 +348,21 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
// CHECK-LABEL: define void @test_builtin_elementwise_max(
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
// CHECK-NEXT: call float @llvm.maxnum.f32(float [[F1]], float [[F2]])
// CHECK-NEXT: call nsz float @llvm.maxnum.f32(float [[F1]], float [[F2]])
f1 = __builtin_elementwise_max(f1, f2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.maxnum.f64(double [[D1]], double [[D2]])
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double [[D1]], double [[D2]])
d1 = __builtin_elementwise_max(d1, d2);

// CHECK: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
d1 = __builtin_elementwise_max(20.0, d2);

// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
vf1 = __builtin_elementwise_max(vf1, vf2);

// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
Expand Down Expand Up @@ -405,13 +405,13 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,

// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
const float4 cvf1 = vf1;
vf1 = __builtin_elementwise_max(cvf1, vf2);

// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
vf1 = __builtin_elementwise_max(vf2, cvf1);

// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
Expand All @@ -432,21 +432,21 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
// CHECK-LABEL: define void @test_builtin_elementwise_min(
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
// CHECK-NEXT: call float @llvm.minnum.f32(float [[F1]], float [[F2]])
// CHECK-NEXT: call nsz float @llvm.minnum.f32(float [[F1]], float [[F2]])
f1 = __builtin_elementwise_min(f1, f2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double [[D2]])
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double [[D2]])
d1 = __builtin_elementwise_min(d1, d2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
d1 = __builtin_elementwise_min(d1, 2.0);

// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
vf1 = __builtin_elementwise_min(vf1, vf2);

// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
Expand Down Expand Up @@ -496,13 +496,13 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,

// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
const float4 cvf1 = vf1;
vf1 = __builtin_elementwise_min(cvf1, vf2);

// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
vf1 = __builtin_elementwise_min(vf2, cvf1);

// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
Expand Down
12 changes: 6 additions & 6 deletions clang/test/CodeGen/builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,22 +371,22 @@ void test_float_builtin_ops(float F, double D, long double LD, int I) {
// CHECK: call [[LDTYPE]] @llvm.canonicalize.[[LDLLVMTY]]([[LDTYPE]]

resf = __builtin_fminf(F, F);
// CHECK: call float @llvm.minnum.f32
// CHECK: call nsz float @llvm.minnum.f32

resd = __builtin_fmin(D, D);
// CHECK: call double @llvm.minnum.f64
// CHECK: call nsz double @llvm.minnum.f64

resld = __builtin_fminl(LD, LD);
// CHECK: call [[LDTYPE]] @llvm.minnum.[[LDLLVMTY]]
// CHECK: call nsz [[LDTYPE]] @llvm.minnum.[[LDLLVMTY]]

resf = __builtin_fmaxf(F, F);
// CHECK: call float @llvm.maxnum.f32
// CHECK: call nsz float @llvm.maxnum.f32

resd = __builtin_fmax(D, D);
// CHECK: call double @llvm.maxnum.f64
// CHECK: call nsz double @llvm.maxnum.f64

resld = __builtin_fmaxl(LD, LD);
// CHECK: call [[LDTYPE]] @llvm.maxnum.[[LDLLVMTY]]
// CHECK: call nsz [[LDTYPE]] @llvm.maxnum.[[LDLLVMTY]]

resf = __builtin_fminimum_numf(F, F);
// CHECK: call float @llvm.minimumnum.f32
Expand Down
16 changes: 8 additions & 8 deletions clang/test/CodeGen/constrained-math-builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,17 +123,17 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f);

__builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);

// CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")

__builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f); __builtin_fminf128(f,f);

// CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")

__builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f); __builtin_llrintf128(f);

Expand Down
16 changes: 8 additions & 8 deletions clang/test/CodeGen/math-builtins-long.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,16 +148,16 @@ void foo(long double f, long double *l, int *i, const char *c) {
// PPCF128: call fp128 @llvm.floor.f128(fp128 %{{.+}})
__builtin_floorl(f);

// F80: call x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
// PPC: call ppc_fp128 @llvm.maxnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
// X86F128: call fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// PPCF128: call fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// F80: call nsz x86_fp80 @llvm.maxnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
// PPC: call nsz ppc_fp128 @llvm.maxnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
// X86F128: call nsz fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// PPCF128: call nsz fp128 @llvm.maxnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
__builtin_fmaxl(f,f);

// F80: call x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
// PPC: call ppc_fp128 @llvm.minnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
// X86F128: call fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// PPCF128: call fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// F80: call nsz x86_fp80 @llvm.minnum.f80(x86_fp80 %{{.+}}, x86_fp80 %{{.+}})
// PPC: call nsz ppc_fp128 @llvm.minnum.ppcf128(ppc_fp128 %{{.+}}, ppc_fp128 %{{.+}})
// X86F128: call nsz fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
// PPCF128: call nsz fp128 @llvm.minnum.f128(fp128 %{{.+}}, fp128 %{{.+}})
__builtin_fminl(f,f);

// F80: call x86_fp80 @llvm.nearbyint.f80(x86_fp80 %{{.+}})
Expand Down
Loading
Loading