[FPEnv] Use strictfp metadata in casting nodes

The strictfp metadata was added to the casting AST nodes in D85960, but
we aren't using that metadata yet. This patch adds that support.

In order to avoid lots of ad-hoc passing around of the strictfp bits I
updated the IRBuilder when moving from a function that has the Expr* to a
function that lacks it. I believe we should switch to this pattern to keep
the strictfp support from being overly invasive.

For the purpose of testing that we're picking up the right metadata, I
also made my tests use a pragma to make the AST's strictfp metadata not
match the global strictfp metadata. This exposes issues that we need to
deal with in subsequent patches, and I believe this is the right method
for most all of our clang strictfp tests.

Differential Revision: https://reviews.llvm.org/D88913
This commit is contained in:
Kevin P. Neal 2020-11-06 10:44:13 -05:00
parent d0b8810fe4
commit 2069403cdf
13 changed files with 1058 additions and 36 deletions

View File

@ -187,6 +187,7 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
QualType BoolTy = getContext().BoolTy;
SourceLocation Loc = E->getExprLoc();
CGFPOptionsRAII FPOptsRAII(*this, E);
if (!E->getType()->isAnyComplexType())
return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);

View File

@ -536,17 +536,21 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
case CK_IntegralRealToComplex:
case CK_IntegralRealToComplex: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op);
return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op), Op->getType(),
DestTy, Op->getExprLoc());
}
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_IntegralComplexToFloatingComplex: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op);
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy,
Op->getExprLoc());
}
}
llvm_unreachable("unknown cast resulting in complex value");
}
@ -900,6 +904,7 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
LHSTy = AT->getValueType();
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
BinOpInfo OpInfo;
// Load the RHS and LHS operands.

View File

@ -2235,9 +2235,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_FloatingToIntegral:
case CK_FloatingCast:
case CK_FixedPointToFloating:
case CK_FloatingToFixedPoint:
case CK_FloatingToFixedPoint: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc());
}
case CK_BooleanToSignedIntegral: {
ScalarConversionOpts Opts;
Opts.TreatBooleanAsSigned = true;
@ -2248,8 +2250,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
return EmitPointerToBoolConversion(Visit(E), E->getType());
case CK_FloatingToBoolean:
case CK_FloatingToBoolean: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
return EmitFloatToBoolConversion(Visit(E));
}
case CK_MemberPointerToBoolean: {
llvm::Value *MemPtr = Visit(E);
const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();

View File

@ -25,6 +25,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
@ -130,11 +131,25 @@ void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
Builder.setFastMathFlags(FMF);
}
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
const Expr *E)
: CGF(CGF) {
ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
}
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
FPOptions FPFeatures)
: CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
: CGF(CGF) {
ConstructorHelper(FPFeatures);
}
void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
OldFPFeatures = CGF.CurFPFeatures;
CGF.CurFPFeatures = FPFeatures;
OldExcept = CGF.Builder.getDefaultConstrainedExcept();
OldRounding = CGF.Builder.getDefaultConstrainedRounding();
if (OldFPFeatures == FPFeatures)
return;
@ -175,6 +190,8 @@ CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
CGF.CurFPFeatures = OldFPFeatures;
CGF.Builder.setDefaultConstrainedExcept(OldExcept);
CGF.Builder.setDefaultConstrainedRounding(OldRounding);
}
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {

View File

@ -628,11 +628,15 @@ public:
class CGFPOptionsRAII {
public:
CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures);
CGFPOptionsRAII(CodeGenFunction &CGF, const Expr *E);
~CGFPOptionsRAII();
private:
void ConstructorHelper(FPOptions FPFeatures);
CodeGenFunction &CGF;
FPOptions OldFPFeatures;
llvm::fp::ExceptionBehavior OldExcept;
llvm::RoundingMode OldRounding;
Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
};
FPOptions CurFPFeatures;

View File

@ -697,7 +697,7 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
// If T is cv std::nullptr_t, the result is a null pointer constant.
CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue,
FPOptionsOverride());
CurFPFeatureOverrides());
// C11 6.3.2.1p2:
// ... if the lvalue has atomic type, the value has the non-atomic version

View File

@ -3,7 +3,7 @@
// RUN: | opt -S -mem2reg \
// RUN: | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
// RUN: -ffp-exception-behavior=strict \
// RUN: -ffp-exception-behavior=maytrap -DEXCEPT=1 \
// RUN: -fexperimental-strict-floating-point \
// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
// RUN: | opt -S -mem2reg \
@ -13,19 +13,27 @@
// RUN: | opt -S -mem2reg | llc -o=- - \
// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -target-feature +fullfp16 -target-feature +v8.2a\
// RUN: -ffp-exception-behavior=strict \
// RUN: -ffp-exception-behavior=maytrap -DEXCEPT=1 \
// RUN: -fexperimental-strict-floating-point \
// RUN: -fallow-half-arguments-and-returns -flax-vector-conversions=none -S -disable-O0-optnone -emit-llvm -o - %s \
// RUN: | opt -S -mem2reg | llc -o=- - \
// RUN: | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
//
// REQUIRES: aarch64-registered-target
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
#if EXCEPT
#pragma float_control(except, on)
#endif
#include <arm_neon.h>
// COMMON-LABEL: test_vsqrt_f16
// UNCONSTRAINED: [[SQR:%.*]] = call <4 x half> @llvm.sqrt.v4f16(<4 x half> %a)
// CONSTRAINED: [[SQR:%.*]] = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[SQR:%.*]] = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fsqrt v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
// COMMONIR: ret <4 x half> [[SQR]]
float16x4_t test_vsqrt_f16(float16x4_t a) {
@ -34,7 +42,7 @@ float16x4_t test_vsqrt_f16(float16x4_t a) {
// COMMON-LABEL: test_vsqrtq_f16
// UNCONSTRAINED: [[SQR:%.*]] = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
// CONSTRAINED: [[SQR:%.*]] = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[SQR:%.*]] = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fsqrt v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
// COMMONIR: ret <8 x half> [[SQR]]
float16x8_t test_vsqrtq_f16(float16x8_t a) {
@ -43,7 +51,7 @@ float16x8_t test_vsqrtq_f16(float16x8_t a) {
// COMMON-LABEL: test_vfma_f16
// UNCONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
// CONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
// COMMONIR: ret <4 x half> [[ADD]]
float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@ -52,7 +60,7 @@ float16x4_t test_vfma_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
// COMMON-LABEL: test_vfmaq_f16
// UNCONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
// CONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
// COMMONIR: ret <8 x half> [[ADD]]
float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@ -63,7 +71,7 @@ float16x8_t test_vfmaq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
// COMMONIR: [[SUB:%.*]] = fneg <4 x half> %b
// CHECK-ASM: fneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
// UNCONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a)
// CONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[ADD:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> %c, <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
// COMMONIR: ret <4 x half> [[ADD]]
float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@ -74,7 +82,7 @@ float16x4_t test_vfms_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
// COMMONIR: [[SUB:%.*]] = fneg <8 x half> %b
// CHECK-ASM: fneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
// UNCONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a)
// CONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[ADD:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> %c, <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
// COMMONIR: ret <8 x half> [[ADD]]
float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@ -90,7 +98,7 @@ float16x8_t test_vfmsq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
// COMMONIR: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
// COMMONIR: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMLA]]
float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@ -106,7 +114,7 @@ float16x4_t test_vfma_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
// COMMONIR: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMLA]]
float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
@ -122,7 +130,7 @@ float16x8_t test_vfmaq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
// COMMONIR: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMLA]]
float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
@ -138,7 +146,7 @@ float16x4_t test_vfma_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
// COMMONIR: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMLA]]
float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@ -151,7 +159,7 @@ float16x8_t test_vfmaq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
// COMMONIR: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
// COMMONIR: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
// UNCONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a)
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %b, <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMA]]
float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
@ -168,7 +176,7 @@ float16x4_t test_vfma_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
// COMMONIR: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
// COMMONIR: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
// UNCONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a)
// CONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %b, <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMA]]
float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
@ -178,7 +186,7 @@ float16x8_t test_vfmaq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
// COMMON-LABEL: test_vfmah_lane_f16
// COMMONIR: [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
// UNCONSTRAINED: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret half [[FMA]]
float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
@ -188,7 +196,7 @@ float16_t test_vfmah_lane_f16(float16_t a, float16_t b, float16x4_t c) {
// COMMON-LABEL: test_vfmah_laneq_f16
// COMMONIR: [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
// UNCONSTRAINED: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half [[EXTR]], half %a)
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret half [[FMA]]
float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
@ -205,7 +213,7 @@ float16_t test_vfmah_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
// COMMONIR: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
// COMMONIR: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
// UNCONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]])
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[TMP4]], <4 x half> [[LANE]], <4 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMA]]
float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
@ -222,7 +230,7 @@ float16x4_t test_vfms_lane_f16(float16x4_t a, float16x4_t b, float16x4_t c) {
// COMMONIR: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]])
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[TMP4]], <8 x half> [[LANE]], <8 x half> [[TMP5]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMLA]]
float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
@ -240,7 +248,7 @@ float16x8_t test_vfmsq_lane_f16(float16x8_t a, float16x8_t b, float16x4_t c) {
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
// COMMONIR: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <4 x i32> <i32 7, i32 7, i32 7, i32 7>
// UNCONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]])
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[LANE]], <4 x half> [[TMP4]], <4 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMLA]]
float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
@ -258,7 +266,7 @@ float16x4_t test_vfms_laneq_f16(float16x4_t a, float16x4_t b, float16x8_t c) {
// COMMONIR: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half>
// COMMONIR: [[LANE:%.*]] = shufflevector <8 x half> [[TMP5]], <8 x half> [[TMP5]], <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
// UNCONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]])
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMLA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[LANE]], <8 x half> [[TMP4]], <8 x half> [[TMP3]], metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMLA]]
float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
@ -272,7 +280,7 @@ float16x8_t test_vfmsq_laneq_f16(float16x8_t a, float16x8_t b, float16x8_t c) {
// COMMONIR: [[TMP2:%.*]] = insertelement <4 x half> [[TMP1]], half %c, i32 2
// COMMONIR: [[TMP3:%.*]] = insertelement <4 x half> [[TMP2]], half %c, i32 3
// UNCONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a)
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> [[SUB]], <4 x half> [[TMP3]], <4 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <4 x half> [[FMA]]
float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
@ -290,7 +298,7 @@ float16x4_t test_vfms_n_f16(float16x4_t a, float16x4_t b, float16_t c) {
// COMMONIR: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half %c, i32 6
// COMMONIR: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half %c, i32 7
// UNCONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a)
// CONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> [[SUB]], <8 x half> [[TMP7]], <8 x half> %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret <8 x half> [[FMA]]
float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
@ -308,7 +316,7 @@ float16x8_t test_vfmsq_n_f16(float16x8_t a, float16x8_t b, float16_t c) {
// CHECK-ASM: fcvt h{{[0-9]+}}, s{{[0-9]+}}
// COMMONIR: [[EXTR:%.*]] = extractelement <4 x half> %c, i32 3
// UNCONSTRAINED: [[FMA:%.*]] = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret half [[FMA]]
float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
@ -326,10 +334,9 @@ float16_t test_vfmsh_lane_f16(float16_t a, float16_t b, float16x4_t c) {
// CHECK-ASM: fcvt h{{[0-9]+}}, s{{[0-9]+}}
// COMMONIR: [[EXTR:%.*]] = extractelement <8 x half> %c, i32 7
// UNCONSTRAINED: [[FMA:%.*]] = call half @llvm.fma.f16(half [[SUB]], half [[EXTR]], half %a)
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CONSTRAINED: [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half [[EXTR]], half %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK-ASM: fmla h{{[0-9]+}}, h{{[0-9]+}}, v{{[0-9]+}}.h[{{[0-9]+}}]
// COMMONIR: ret half [[FMA]]
float16_t test_vfmsh_laneq_f16(float16_t a, float16_t b, float16x8_t c) {
return vfmsh_laneq_f16(a, b, c, 7);
}

View File

@ -0,0 +1,50 @@
// RUN: %clang_cc1 -emit-llvm -triple x86_64-windows-pc -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,FP16
// RUN: %clang_cc1 -emit-llvm -triple ppc64-be -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,NOFP16
// test to ensure that these builtins don't do the variadic promotion of float->double.
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
#pragma float_control(except, on)
// CHECK-LABEL: @test_half
void test_half(__fp16 *H, __fp16 *H2) {
(void)__builtin_isgreater(*H, *H2);
// FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_isinf(*H);
// NOFP16: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.maytrap")
// FP16: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH7C00, metadata !"oeq", metadata !"fpexcept.maytrap")
}
// CHECK-LABEL: @test_mixed
void test_mixed(double d1, float f2) {
(void)__builtin_isgreater(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ogt", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_isgreaterequal(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"oge", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_isless(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"olt", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_islessequal(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ole", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_islessgreater(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"one", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
(void)__builtin_isunordered(d1, f2);
// CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
// CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"uno", metadata !"fpexcept.maytrap")
// CHECK-NEXT: zext i1
}

View File

@ -0,0 +1,163 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -ffp-exception-behavior=maytrap -emit-llvm -o - %s | FileCheck %s
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
// Include rounding metadata in the testing.
// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
// FIXME: All cases of "round.tonearest" in this test are wrong.
#pragma float_control(except, on)
#pragma STDC FENV_ROUND FE_UPWARD
_Complex double g1, g2;
_Complex float cf;
double D;
// CHECK-LABEL: @test3a(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* @D, align 8
// CHECK-NEXT: [[CF_REAL:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL]], metadata !"fpexcept.strict") [[ATTR2:#.*]]
// CHECK-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG]], metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[ADD_R:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV]], double [[TMP0]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[ADD_R]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[CONV1]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: store float [[CONV2]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: store float [[CONV3]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: ret void
//
void test3a() {
cf += D;
}
// CHECK-LABEL: @test3b(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CF_REAL:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL]], metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG]], metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* @D, align 8
// CHECK-NEXT: [[ADD_R:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[CONV]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: store double [[ADD_R]], double* @D, align 8
// CHECK-NEXT: ret void
//
void test3b() {
D += cf;
}
// CHECK-LABEL: @test3c(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[G1_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
// CHECK-NEXT: [[CF_REAL:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_REAL]], metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CF_IMAG]], metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @__divdc3(double [[CONV]], double [[CONV1]], double [[G1_REAL]], double [[G1_IMAG]]) [[ATTR3:#.*]]
// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0
// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1
// CHECK-NEXT: [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP0]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP1]], metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: store float [[CONV2]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: store float [[CONV3]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: ret void
//
void test3c() {
cf /= g1;
}
// CHECK-LABEL: @test3d(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[G1_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* @D, align 8
// CHECK-NEXT: [[ADD_R:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[G1_REAL]], double [[TMP0]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR2]]
// CHECK-NEXT: store double [[ADD_R]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
// CHECK-NEXT: store double [[G1_IMAG]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
// CHECK-NEXT: ret void
//
void test3d() {
g1 = g1 + D;
}
// CHECK-LABEL: @test3e(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* @D, align 8
// CHECK-NEXT: [[G1_REAL:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
// CHECK-NEXT: [[ADD_R:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[G1_REAL]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") [[ATTR2]]
// CHECK-NEXT: store double [[ADD_R]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 0), align 8
// CHECK-NEXT: store double [[G1_IMAG]], double* getelementptr inbounds ({ double, double }, { double, double }* @g1, i32 0, i32 1), align 8
// CHECK-NEXT: ret void
//
void test3e() {
g1 = D + g1;
}
// CHECK-LABEL: @t1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double 4.000000e+00, metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: store float [[CONV]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
// CHECK-NEXT: ret void
//
void t1() {
(__real__ cf) = 4.0;
}
// CHECK-LABEL: @t2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CONV:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double 4.000000e+00, metadata !"round.upward", metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: store float [[CONV]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
// CHECK-NEXT: ret void
//
void t2() {
(__imag__ cf) = 4.0;
}
// CHECK-LABEL: @t91(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1
// CHECK-NEXT: br i1 false, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND_R:%.*]] = phi double [ [[CONV]], [[COND_TRUE]] ], [ [[CONV1]], [[COND_FALSE]] ]
// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, [[COND_TRUE]] ], [ 0.000000e+00, [[COND_FALSE]] ]
// CHECK-NEXT: ret void
//
void t91() {
// Check for proper type promotion of conditional expression
char c[(int)(sizeof(typeof((0 ? 2.0f : (_Complex double) 2.0f))) - sizeof(_Complex double))];
// Check for proper codegen
(0 ? 2.0f : (_Complex double) 2.0f);
}
// CHECK-LABEL: @t92(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1
// CHECK-NEXT: br i1 false, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float 2.000000e+00, metadata !"fpexcept.strict") [[ATTR2]]
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND_R:%.*]] = phi double [ [[CONV]], [[COND_TRUE]] ], [ [[CONV1]], [[COND_FALSE]] ]
// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, [[COND_TRUE]] ], [ 0.000000e+00, [[COND_FALSE]] ]
// CHECK-NEXT: ret void
//
void t92() {
// Check for proper type promotion of conditional expression
char c[(int)(sizeof(typeof((0 ? (_Complex double) 2.0f : 2.0f))) - sizeof(_Complex double))];
// Check for proper codegen
(0 ? (_Complex double) 2.0f : 2.0f);
}

View File

@ -1,6 +1,11 @@
// RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=strict -w -S -o - -emit-llvm %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=maytrap -w -S -o - -emit-llvm %s | FileCheck %s
// Test codegen of constrained math builtins.
//
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
#pragma float_control(except, on)
void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
f = __builtin_fmod(f,f); f = __builtin_fmodf(f,f); f = __builtin_fmodl(f,f); f = __builtin_fmodf128(f,f);
@ -177,9 +182,9 @@ void bar(float f) {
(double)f * f - f;
(long double)-f * f + f;
// CHECK: call float @llvm.experimental.constrained.fmuladd.f32
// CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: fneg
// CHECK: call double @llvm.experimental.constrained.fmuladd.f64
// CHECK: call double @llvm.experimental.constrained.fmuladd.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: fneg
// CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80
// CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
};

View File

@ -0,0 +1,17 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -ffp-exception-behavior=maytrap -emit-llvm -o - | FileCheck %s
// Test codegen of constrained floating point to bool conversion
//
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
#pragma float_control(except, on)
void eMaisUma() {
double t[1];
if (*t)
return;
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double 0.000000e+00, metadata !"une", metadata !"fpexcept.strict")
}

View File

@ -0,0 +1,741 @@
// REQUIRES: arm-registered-target
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK -vv -dump-input=fail
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple x86_64-linux-gnu %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fallow-half-arguments-and-returns %s | FileCheck %s --check-prefix=NOTNATIVE --check-prefix=CHECK
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple arm-none-linux-gnueabi -fnative-half-type %s \
// RUN: | FileCheck %s --check-prefix=NATIVE-HALF --check-prefix=CHECK
// RUN: %clang_cc1 -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - -triple aarch64-none-linux-gnueabi -fnative-half-type %s \
// RUN: | FileCheck %s --check-prefix=NATIVE-HALF --check-prefix=CHECK
//
// Test that the constrained intrinsics are picking up the exception
// metadata from the AST instead of the global default from the command line.
// FIXME: All cases of "fpexcept.maytrap" in this test are wrong.
#pragma float_control(except, on)
typedef unsigned cond_t;
typedef __fp16 float16_t;
volatile cond_t test;
volatile int i0;
volatile __fp16 h0 = 0.0, h1 = 1.0, h2;
volatile float f0, f1, f2;
volatile double d0;
short s0;
void foo(void) {
// CHECK-LABEL: define void @foo()
// Check unary ops
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %{{.*}}, metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = (test);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH0000, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float 0.000000e+00, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (!h1);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: fneg float
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: fneg half
// CHECK: store {{.*}} half {{.*}}, half*
h1 = -h1;
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: load volatile half
// NATIVE-HALF-NEXT: store volatile half
// NOTNATIVE: store {{.*}} half {{.*}}, half*
h1 = +h1;
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xH3C00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h1++;
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xH3C00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
++h1;
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xHBC00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
--h1;
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half 0xHBC00, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h1--;
// Check binary ops with various operands
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = h0 * h2;
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = h0 * (__fp16) -2.0f;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = h0 * f2;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = f0 * h2;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = h0 * i0;
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 / h2);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 / (__fp16) -2.0f);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 / f2);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (f0 / h2);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 / i0);
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h2 + h0);
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = ((__fp16)-2.0 + h0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h2 + f0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (f2 + h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 + i0);
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h2 - h0);
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = ((__fp16)-2.0f - h0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h2 - f0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (f2 - h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h0 - i0);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 < h0);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 < (__fp16)42.0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 < f0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f2 < h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 < h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 < i0);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 > h2);
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = ((__fp16)42.0 > h2);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 > f2);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f0 > h2);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 > h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 > i0);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 <= h0);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 4.200000e+01, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %98, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 <= (__fp16)42.0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h2 <= f0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f2 <= h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 <= h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 <= i0);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 >= h2);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 >= (__fp16)-2.0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 >= f2);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f0 >= h2);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 >= h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmps.f16(half %{{.*}}, half %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 >= i0);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 == h2);
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %122, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 == (__fp16)1.0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 == f1);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f1 == h1);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 == h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 == i0);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 != h2);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 != (__fp16)1.0);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h1 != f1);
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (f1 != h1);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (i0 != h0);
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
test = (h0 != i0);
// NATIVE-HALF: call i1 @llvm.experimental.constrained.fcmp.f16(half %{{.*}}, half 0xH0000, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float {{.*}}, metadata !"une", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h1 = (h1 ? h2 : h0);
// Check assignments (inc. compound)
// CHECK: store {{.*}} half {{.*}}, half*
// xATIVE-HALF: store {{.*}} half 0xHC000 // FIXME: We should be folding here.
h0 = h1;
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float -2.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = (__fp16)-2.0f;
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = f0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = i0;
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: store {{.*}} i32 {{.*}}, i32*
i0 = h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 += h1;
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f32(float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 += (__fp16)1.0f;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 += f2;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} i32 {{.*}}, i32*
i0 += h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fadd.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fadd.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 += i0;
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 -= h1;
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 -= (__fp16)1.0;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 -= f2;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} i32 {{.*}}, i32*
i0 -= h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fsub.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fsub.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 -= i0;
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 *= h1;
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 *= (__fp16)1.0;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 *= f2;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} i32 {{.*}}, i32*
i0 *= h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fmul.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fmul.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 *= i0;
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 /= h1;
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fptrunc.f16.f64(double 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 /= (__fp16)1.0;
// CHECK: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 /= f2;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %{{.*}}, metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} i32 {{.*}}, i32*
i0 /= h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NATIVE-HALF: call half @llvm.experimental.constrained.fdiv.f16(half %{{.*}}, half %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.maytrap")
// NOTNATIVE: call float @llvm.experimental.constrained.fdiv.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
// CHECK: store {{.*}} half {{.*}}, half*
h0 /= i0;
// Check conversions to/from double
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = d0;
// CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = (float)d0;
// CHECK: call double @llvm.experimental.constrained.fpext.f64.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: store {{.*}} double {{.*}}, double*
d0 = h0;
// CHECK: [[MID:%.*]] = call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float [[MID]], metadata !"fpexcept.strict")
// CHECK: store {{.*}} double {{.*}}, double*
d0 = (float)h0;
// NATIVE-HALF: call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call half @llvm.experimental.constrained.fptrunc.f16.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: store {{.*}} half {{.*}}, half*
h0 = s0;
}
// CHECK-LABEL: define void @testTypeDef(
// NATIVE-HALF: call <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: %[[CONV:.*]] = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: %[[CONV1:.*]] = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
// NOTNATIVE: %[[ADD:.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %conv, <4 x float> %conv1, metadata !"round.tonearest", metadata !"fpexcept.strict")
// NOTNATIVE: call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %add, metadata !"round.tonearest", metadata !"fpexcept.strict")
void testTypeDef() {
__fp16 t0 __attribute__((vector_size(8)));
float16_t t1 __attribute__((vector_size(8)));
t1 = t0 + t1;
}

View File

@ -266,11 +266,19 @@ public:
/// Set the exception handling to be used with constrained floating point
void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
#ifndef NDEBUG
Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
assert(ExceptStr.hasValue() && "Garbage strict exception behavior!");
#endif
DefaultConstrainedExcept = NewExcept;
}
/// Set the rounding mode handling to be used with constrained floating point
void setDefaultConstrainedRounding(RoundingMode NewRounding) {
#ifndef NDEBUG
Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
assert(RoundingStr.hasValue() && "Garbage strict rounding mode!");
#endif
DefaultConstrainedRounding = NewRounding;
}