Revert "Introduce intrinsic llvm.isnan"

This reverts commit 16ff91ebcc.
Several errors were reported mainly test-suite execution time. Reverted
for investigation.
This commit is contained in:
Serge Pavlov 2021-08-04 17:18:15 +07:00
parent fc8dee1ebb
commit 0c28a7c990
24 changed files with 145 additions and 2597 deletions

View File

@ -3068,17 +3068,37 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *V = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = V->getType();
const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
if (!Builder.getIsFPConstrained() ||
Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
!Ty->isIEEE()) {
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
return RValue::get(Result);
Function *F = CGM.getIntrinsic(Intrinsic::isnan, V->getType());
Value *Call = Builder.CreateCall(F, V);
return RValue::get(Builder.CreateZExt(Call, ConvertType(E->getType())));
// NaN has all exp bits set and a non zero significand. Therefore:
// isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
unsigned bitsize = Ty->getScalarSizeInBits();
llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
Value *IntV = Builder.CreateBitCast(V, IntTy);
APInt AndMask = APInt::getSignedMaxValue(bitsize);
Value *AbsV =
Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
Value *Sub =
Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
// V = sign bit (Sub) <=> V = (Sub < 0)
V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
if (bitsize > 32)
V = Builder.CreateTrunc(V, ConvertType(E->getType()));
return RValue::get(V);
}
case Builtin::BI__builtin_matrix_transpose: {

View File

@ -17,7 +17,7 @@ int printf(const char *, ...);
// CHECK-NEXT: store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
// CHECK-NEXT: ret void
//
void p(char *str, int x) {
@ -29,13 +29,13 @@ void p(char *str, int x) {
// CHECK-LABEL: @test_long_double_isinf(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
// CHECK-NEXT: [[TMP2:%.*]] = shl i80 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp eq i80 [[TMP2]], -18446744073709551616
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.1, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
// CHECK-NEXT: [[SHL1:%.*]] = shl i80 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp eq i80 [[SHL1]], -18446744073709551616
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_long_double_isinf(long double ld) {
@ -47,13 +47,13 @@ void test_long_double_isinf(long double ld) {
// CHECK-LABEL: @test_long_double_isfinite(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
// CHECK-NEXT: [[TMP2:%.*]] = shl i80 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i80 [[TMP2]], -18446744073709551616
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
// CHECK-NEXT: [[SHL1:%.*]] = shl i80 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i80 [[SHL1]], -18446744073709551616
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_long_double_isfinite(long double ld) {
@ -65,11 +65,14 @@ void test_long_double_isfinite(long double ld) {
// CHECK-LABEL: @test_long_double_isnan(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.isnan.f80(x86_fp80 [[TMP0]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
// CHECK-NEXT: [[ABS:%.*]] = and i80 [[BITCAST]], 604462909807314587353087
// CHECK-NEXT: [[TMP1:%.*]] = sub i80 604453686435277732577280, [[ABS]]
// CHECK-NEXT: [[ISNAN:%.*]] = lshr i80 [[TMP1]], 79
// CHECK-NEXT: [[RES:%.*]] = trunc i80 [[ISNAN]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_long_double_isnan(long double ld) {

View File

@ -1,4 +1,3 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 %s -emit-llvm -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -o - -triple arm64-none-linux-gnu | FileCheck %s
// Test that the constrained intrinsics are picking up the exception
@ -16,7 +15,7 @@ int printf(const char *, ...);
// CHECK-NEXT: store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR3:[0-9]+]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
// CHECK-NEXT: ret void
//
void p(char *str, int x) {
@ -28,13 +27,13 @@ void p(char *str, int x) {
// CHECK-LABEL: @test_long_double_isinf(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca fp128, align 16
// CHECK-NEXT: store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
// CHECK-NEXT: [[TMP2:%.*]] = shl i128 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp eq i128 [[TMP2]], -10384593717069655257060992658440192
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.1, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
// CHECK-NEXT: [[SHL1:%.*]] = shl i128 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp eq i128 [[SHL1]], -10384593717069655257060992658440192
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_long_double_isinf(long double ld) {
@ -46,13 +45,13 @@ void test_long_double_isinf(long double ld) {
// CHECK-LABEL: @test_long_double_isfinite(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca fp128, align 16
// CHECK-NEXT: store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
// CHECK-NEXT: [[TMP2:%.*]] = shl i128 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i128 [[TMP2]], -10384593717069655257060992658440192
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
// CHECK-NEXT: [[SHL1:%.*]] = shl i128 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i128 [[SHL1]], -10384593717069655257060992658440192
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_long_double_isfinite(long double ld) {
@ -64,11 +63,14 @@ void test_long_double_isfinite(long double ld) {
// CHECK-LABEL: @test_long_double_isnan(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca fp128, align 16
// CHECK-NEXT: store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.isnan.f128(fp128 [[TMP0]]) #[[ATTR3]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
// CHECK-NEXT: [[ABS:%.*]] = and i128 [[BITCAST]], 170141183460469231731687303715884105727
// CHECK-NEXT: [[TMP1:%.*]] = sub i128 170135991163610696904058773219554885632, [[ABS]]
// CHECK-NEXT: [[ISNAN:%.*]] = lshr i128 [[TMP1]], 127
// CHECK-NEXT: [[RES:%.*]] = trunc i128 [[ISNAN]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]])
// CHECK-NEXT: ret void
//
void test_long_double_isnan(long double ld) {

View File

@ -17,7 +17,7 @@ int printf(const char *, ...);
// CHECK-NEXT: store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR5:[0-9]+]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
// CHECK-NEXT: ret void
//
void p(char *str, int x) {
@ -31,21 +31,21 @@ void p(char *str, int x) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
// CHECK: fpclassify_end:
// CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @.str.1, i64 0, i64 0), i32 [[FPCLASSIFY_RESULT]]) #[[ATTR5]]
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @.str.1, i64 0, i64 0), i32 [[FPCLASSIFY_RESULT]]) [[ATTR4]]
// CHECK-NEXT: ret void
// CHECK: fpclassify_not_zero:
// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
// CHECK: fpclassify_not_nan:
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5:#.*]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
// CHECK: fpclassify_not_inf:
// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
// CHECK-NEXT: br label [[FPCLASSIFY_END]]
//
@ -57,14 +57,14 @@ void test_fpclassify(double d) {
// CHECK-LABEL: @test_fp16_isinf(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = bitcast half [[TMP0]] to i16
// CHECK-NEXT: [[TMP2:%.*]] = shl i16 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp eq i16 [[TMP2]], -2048
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], half* [[LD_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, half* [[LD_ADDR]], align 2
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
// CHECK-NEXT: [[SHL1:%.*]] = shl i16 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[SHL1]], -2048
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:2]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_fp16_isinf(__fp16 h) {
@ -75,14 +75,14 @@ void test_fp16_isinf(__fp16 h) {
// CHECK-LABEL: @test_float_isinf(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], -16777216
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], float* [[LD_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[LD_ADDR]], align 4
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
// CHECK-NEXT: [[SHL1:%.*]] = shl i32 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SHL1]], -16777216
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_float_isinf(float f) {
@ -93,14 +93,14 @@ void test_float_isinf(float f) {
// CHECK-LABEL: @test_double_isinf(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], -9007199254740992
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.4, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[LD_ADDR]], align 8
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[SHL1:%.*]] = shl i64 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[SHL1]], -9007199254740992
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_double_isinf(double d) {
@ -111,14 +111,14 @@ void test_double_isinf(double d) {
// CHECK-LABEL: @test_fp16_isfinite(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = bitcast half [[TMP0]] to i16
// CHECK-NEXT: [[TMP2:%.*]] = shl i16 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i16 [[TMP2]], -2048
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.5, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], half* [[LD_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, half* [[LD_ADDR]], align 2
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
// CHECK-NEXT: [[SHL1:%.*]] = shl i16 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i16 [[SHL1]], -2048
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_fp16_isfinite(__fp16 h) {
@ -129,14 +129,14 @@ void test_fp16_isfinite(__fp16 h) {
// CHECK-LABEL: @test_float_isfinite(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], -16777216
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.6, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], float* [[LD_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[LD_ADDR]], align 4
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
// CHECK-NEXT: [[SHL1:%.*]] = shl i32 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[SHL1]], -16777216
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_float_isfinite(float f) {
@ -147,14 +147,14 @@ void test_float_isfinite(float f) {
// CHECK-LABEL: @test_double_isfinite(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 1
// CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], -9007199254740992
// CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.7, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[LD_ADDR]], align 8
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[SHL1:%.*]] = shl i64 [[BITCAST]], 1
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[SHL1]], -9007199254740992
// CHECK-NEXT: [[RES:%.*]] = zext i1 [[CMP]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_double_isfinite(double d) {
@ -168,13 +168,13 @@ void test_double_isfinite(double d) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
// CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
// CHECK-NEXT: [[TMP5:%.*]] = select i1 [[ISINF]], i32 [[TMP4]], i32 0
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.8, i64 0, i64 0), i32 [[TMP5]]) #[[ATTR5]]
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[TMP5]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_isinf_sign(double d) {
@ -188,9 +188,12 @@ void test_isinf_sign(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.isnan.f16(half [[TMP0]]) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.9, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
// CHECK-NEXT: [[ABS:%.*]] = and i16 [[BITCAST]], [[#%u,0x7FFF]]
// CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[#%u,0x7C00]], [[ABS]]
// CHECK-NEXT: [[ISNAN:%.*]] = lshr i16 [[TMP1]], 15
// CHECK-NEXT: [[RES:%.*]] = zext i16 [[ISNAN]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_fp16_isnan(__fp16 h) {
@ -204,9 +207,11 @@ void test_fp16_isnan(__fp16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.isnan.f32(float [[TMP0]]) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.10, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
// CHECK-NEXT: [[ABS:%.*]] = and i32 [[BITCAST]], [[#%u,0x7FFFFFFF]]
// CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[#%u,0x7F800000]], [[ABS]]
// CHECK-NEXT: [[ISNAN:%.*]] = lshr i32 [[TMP1]], 31
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[ISNAN]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_float_isnan(float f) {
@ -220,9 +225,12 @@ void test_float_isnan(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.isnan.f64(double [[TMP0]]) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.11, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
// CHECK-NEXT: [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[ABS:%.*]] = and i64 [[BITCAST]], [[#%u,0x7FFFFFFFFFFFFFFF]]
// CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[#%u,0x7FF0000000000000]], [[ABS]]
// CHECK-NEXT: [[ISNAN:%.*]] = lshr i64 [[TMP1]], 63
// CHECK-NEXT: [[RES:%.*]] = trunc i64 [[ISNAN]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_double_isnan(double d) {
@ -236,14 +244,14 @@ void test_double_isnan(double d) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], double* [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
// CHECK-NEXT: [[ISEQ:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"ult", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]]
// CHECK-NEXT: [[ISEQ:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"ult", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
// CHECK-NEXT: [[AND:%.*]] = and i1 [[ISEQ]], [[ISINF]]
// CHECK-NEXT: [[AND1:%.*]] = and i1 [[AND]], [[ISNORMAL]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[AND1]] to i32
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.12, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
// CHECK-NEXT: call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[TMP2]]) [[ATTR4]]
// CHECK-NEXT: ret void
//
void test_isnormal(double d) {

View File

@ -20985,52 +20985,6 @@ return any value and uses platform-independent representation of IEEE rounding
modes.
Floating Point Test Intrinsics
------------------------------
These functions get properties of floating point values.
'``llvm.isnan``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
::
declare i1 @llvm.isnan(<fptype> <op>)
declare <N x i1> @llvm.isnan(<vector-fptype> <op>)
Overview:
"""""""""
The '``llvm.isnan``' intrinsic returns a boolean value or vector of boolean
values depending on whether the value is NaN.
If the operand is a floating-point scalar, then the result type is a
boolean (:ref:`i1 <t_integer>`).
If the operand is a floating-point vector, then the result type is a
vector of boolean with the same number of elements as the operand.
Arguments:
""""""""""
The argument to the '``llvm.isnan``' intrinsic must be
:ref:`floating-point <t_floating>` or :ref:`vector <t_vector>`
of floating-point values.
Semantics:
""""""""""
The function tests if ``op`` is NaN. If ``op`` is a vector, then the
check is made element by element. Each test yields an :ref:`i1 <t_integer>`
result, which is ``true``, if the value is NaN. The function never raises
floating point exceptions.
General Intrinsics
------------------

View File

@ -482,10 +482,6 @@ enum NodeType {
/// Returns platform specific canonical encoding of a floating point number.
FCANONICALIZE,
/// Performs check of floating point number property, defined by IEEE-754. The
/// only operand is the floating point value to check. Returns boolean value.
ISNAN,
/// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
/// with the specified, possibly variable, elements. The types of the
/// operands must match the vector element type, except that integer types

View File

@ -4426,10 +4426,6 @@ public:
/// \returns The expansion result
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
/// Expand isnan depending on function attributes.
SDValue expandISNAN(EVT ResultVT, SDValue Op, SDNodeFlags Flags,
const SDLoc &DL, SelectionDAG &DAG) const;
/// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
/// vector nodes can only succeed if all operations are legal/custom.
/// \param N Node to expand

View File

@ -715,14 +715,6 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
def int_set_rounding : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
}
//===--------------- Floating Point Test Intrinsics -----------------------===//
//
def int_isnan
: DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[llvm_anyfloat_ty],
[IntrNoMem, IntrWillReturn]>;
//===--------------- Constrained Floating Point Intrinsics ----------------===//
//

View File

@ -1579,10 +1579,9 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
return !Call->isStrictFP();
// Sign operations are actually bitwise operations, they do not raise
// exceptions even for SNANs. The same applies to classification functions.
// exceptions even for SNANs.
case Intrinsic::fabs:
case Intrinsic::copysign:
case Intrinsic::isnan:
// Non-constrained variants of rounding operations means default FP
// environment, they can be folded in any case.
case Intrinsic::ceil:
@ -2003,9 +2002,6 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
return ConstantInt::get(Ty, Int);
}
if (IntrinsicID == Intrinsic::isnan)
return ConstantInt::get(Ty, U.isNaN());
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
return nullptr;

View File

@ -1184,10 +1184,6 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
Action = TLI.getOperationAction(
Node->getOpcode(), Node->getOperand(1).getValueType());
break;
case ISD::ISNAN:
Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(0).getValueType());
break;
default:
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
Action = TargetLowering::Legal;
@ -3111,12 +3107,6 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
case ISD::FCOPYSIGN:
Results.push_back(ExpandFCOPYSIGN(Node));
break;
case ISD::ISNAN:
if (SDValue Expanded =
TLI.expandISNAN(Node->getValueType(0), Node->getOperand(0),
Node->getFlags(), SDLoc(Node), DAG))
Results.push_back(Expanded);
break;
case ISD::FNEG:
Results.push_back(ExpandFNEG(Node));
break;

View File

@ -139,8 +139,6 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FLT_ROUNDS_: Res = PromoteIntRes_FLT_ROUNDS(N); break;
case ISD::ISNAN: Res = PromoteIntRes_ISNAN(N); break;
case ISD::AND:
case ISD::OR:
case ISD::XOR:
@ -658,14 +656,6 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FLT_ROUNDS(SDNode *N) {
return Res;
}
SDValue DAGTypeLegalizer::PromoteIntRes_ISNAN(SDNode *N) {
SDLoc DL(N);
EVT ResultVT = N->getValueType(0);
EVT NewResultVT = TLI.getTypeToTransformTo(*DAG.getContext(), ResultVT);
return DAG.getNode(N->getOpcode(), DL, NewResultVT, N->getOperand(0),
N->getFlags());
}
SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDLoc dl(N);

View File

@ -352,7 +352,6 @@ private:
SDValue PromoteIntRes_MULFIX(SDNode *N);
SDValue PromoteIntRes_DIVFIX(SDNode *N);
SDValue PromoteIntRes_FLT_ROUNDS(SDNode *N);
SDValue PromoteIntRes_ISNAN(SDNode *N);
SDValue PromoteIntRes_VECREDUCE(SDNode *N);
SDValue PromoteIntRes_ABS(SDNode *N);
SDValue PromoteIntRes_Rotate(SDNode *N);
@ -776,7 +775,6 @@ private:
SDValue ScalarizeVecRes_UNDEF(SDNode *N);
SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
SDValue ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N);
SDValue ScalarizeVecRes_ISNAN(SDNode *N);
SDValue ScalarizeVecRes_FIX(SDNode *N);
@ -837,7 +835,6 @@ private:
void SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_FPOWI(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_FCOPYSIGN(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_ISNAN(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, SDValue &Hi);
void SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, SDValue &Lo, SDValue &Hi);
@ -947,7 +944,6 @@ private:
SDValue WidenVecOp_Convert(SDNode *N);
SDValue WidenVecOp_FP_TO_XINT_SAT(SDNode *N);
SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
SDValue WidenVecOp_ISNAN(SDNode *N);
SDValue WidenVecOp_VECREDUCE(SDNode *N);
SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);

View File

@ -64,7 +64,6 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
case ISD::ISNAN: R = ScalarizeVecRes_ISNAN(N); break;
case ISD::ANY_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
case ISD::ZERO_EXTEND_VECTOR_INREG:
@ -583,28 +582,6 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
return DAG.getNode(ExtendCode, DL, NVT, Res);
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_ISNAN(SDNode *N) {
SDLoc DL(N);
SDValue Arg = N->getOperand(0);
EVT ArgVT = Arg.getValueType();
EVT ResultVT = N->getValueType(0).getVectorElementType();
// Handle case where result is scalarized but operand is not.
if (getTypeAction(ArgVT) == TargetLowering::TypeScalarizeVector) {
Arg = GetScalarizedVector(Arg);
} else {
EVT VT = ArgVT.getVectorElementType();
Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Arg,
DAG.getVectorIdxConstant(0, DL));
}
SDValue Res = DAG.getNode(ISD::ISNAN, DL, MVT::i1, Arg, N->getFlags());
// Vectors may have a different boolean contents to scalars. Promote the
// value appropriately.
ISD::NodeType ExtendCode =
TargetLowering::getExtendForContent(TLI.getBooleanContents(ArgVT));
return DAG.getNode(ExtendCode, DL, ResultVT, Res);
}
//===----------------------------------------------------------------------===//
// Operand Vector Scalarization <1 x ty> -> ty.
@ -947,7 +924,6 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::INSERT_SUBVECTOR: SplitVecRes_INSERT_SUBVECTOR(N, Lo, Hi); break;
case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi); break;
case ISD::FCOPYSIGN: SplitVecRes_FCOPYSIGN(N, Lo, Hi); break;
case ISD::ISNAN: SplitVecRes_ISNAN(N, Lo, Hi); break;
case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::SPLAT_VECTOR:
case ISD::SCALAR_TO_VECTOR:
@ -1384,17 +1360,6 @@ void DAGTypeLegalizer::SplitVecRes_FCOPYSIGN(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(ISD::FCOPYSIGN, DL, LHSHi.getValueType(), LHSHi, RHSHi);
}
void DAGTypeLegalizer::SplitVecRes_ISNAN(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDLoc DL(N);
SDValue ArgLo, ArgHi;
GetSplitVector(N->getOperand(0), ArgLo, ArgHi);
EVT LoVT, HiVT;
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::ISNAN, DL, LoVT, ArgLo, N->getFlags());
Hi = DAG.getNode(ISD::ISNAN, DL, HiVT, ArgHi, N->getFlags());
}
void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue LHSLo, LHSHi;
@ -4582,7 +4547,6 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::STRICT_FSETCCS: Res = WidenVecOp_STRICT_FSETCC(N); break;
case ISD::VSELECT: Res = WidenVecOp_VSELECT(N); break;
case ISD::FCOPYSIGN: Res = WidenVecOp_FCOPYSIGN(N); break;
case ISD::ISNAN: Res = WidenVecOp_ISNAN(N); break;
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
@ -4718,33 +4682,6 @@ SDValue DAGTypeLegalizer::WidenVecOp_FCOPYSIGN(SDNode *N) {
return DAG.UnrollVectorOp(N);
}
SDValue DAGTypeLegalizer::WidenVecOp_ISNAN(SDNode *N) {
SDLoc DL(N);
EVT ResultVT = N->getValueType(0);
SDValue WideArg = GetWidenedVector(N->getOperand(0));
// Process this node similarly to SETCC.
EVT WideResultVT = getSetCCResultType(WideArg.getValueType());
if (ResultVT.getScalarType() == MVT::i1)
WideResultVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
WideResultVT.getVectorNumElements());
SDValue WideNode =
DAG.getNode(ISD::ISNAN, DL, WideResultVT, WideArg, N->getFlags());
// Extract the needed results from the result vector.
EVT ResVT =
EVT::getVectorVT(*DAG.getContext(), WideResultVT.getVectorElementType(),
ResultVT.getVectorNumElements());
SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, WideNode,
DAG.getVectorIdxConstant(0, DL));
EVT OpVT = N->getOperand(0).getValueType();
ISD::NodeType ExtendCode =
TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
return DAG.getNode(ExtendCode, DL, ResultVT, CC);
}
SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
// Since the result is legal and the input is illegal.
EVT VT = N->getValueType(0);

View File

@ -6408,30 +6408,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
return;
}
case Intrinsic::isnan: {
const DataLayout DLayout = DAG.getDataLayout();
EVT DestVT = TLI.getValueType(DLayout, I.getType());
EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
MachineFunction &MF = DAG.getMachineFunction();
const Function &F = MF.getFunction();
SDValue Op = getValue(I.getArgOperand(0));
SDNodeFlags Flags;
Flags.setNoFPExcept(
!F.getAttributes().hasFnAttribute(llvm::Attribute::StrictFP));
// If ISD::ISNAN should be expanded, do it right now, because the expansion
// can use illegal types. Making expansion early allows to legalize these
// types prior to selection.
if (!TLI.isOperationLegalOrCustom(ISD::ISNAN, ArgVT)) {
SDValue Result = TLI.expandISNAN(DestVT, Op, Flags, sdl, DAG);
setValue(&I, Result);
return;
}
SDValue V = DAG.getNode(ISD::ISNAN, sdl, DestVT, Op, Flags);
setValue(&I, V);
return;
}
case Intrinsic::readcyclecounter: {
SDValue Op = getRoot();
Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,

View File

@ -267,7 +267,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FCOPYSIGN: return "fcopysign";
case ISD::FGETSIGN: return "fgetsign";
case ISD::FCANONICALIZE: return "fcanonicalize";
case ISD::ISNAN: return "isnan";
case ISD::FPOW: return "fpow";
case ISD::STRICT_FPOW: return "strict_fpow";
case ISD::SMIN: return "smin";

View File

@ -6970,35 +6970,6 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
return SDValue();
}
SDValue TargetLowering::expandISNAN(EVT ResultVT, SDValue Op, SDNodeFlags Flags,
const SDLoc &DL, SelectionDAG &DAG) const {
EVT OperandVT = Op.getValueType();
assert(OperandVT.isFloatingPoint());
// If floating point exceptions are ignored, expand to unordered comparison.
if (Flags.hasNoFPExcept() &&
isOperationLegalOrCustom(ISD::SETCC, OperandVT.getScalarType()))
return DAG.getSetCC(DL, ResultVT, Op, DAG.getConstantFP(0.0, DL, OperandVT),
ISD::SETUO);
// In general case use integer operations to avoid traps if argument is SNaN.
// NaN has all exp bits set and a non zero significand. Therefore:
// isnan(V) == exp mask < abs(V)
unsigned BitSize = OperandVT.getScalarSizeInBits();
EVT IntVT = OperandVT.changeTypeToInteger();
SDValue ArgV = DAG.getBitcast(IntVT, Op);
APInt AndMask = APInt::getSignedMaxValue(BitSize);
SDValue AndMaskV = DAG.getConstant(AndMask, DL, IntVT);
SDValue AbsV = DAG.getNode(ISD::AND, DL, IntVT, ArgV, AndMaskV);
EVT ScalarFloatVT = OperandVT.getScalarType();
const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext());
const llvm::fltSemantics &Semantics = FloatTy->getFltSemantics();
APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
SDValue ExpMaskV = DAG.getConstant(ExpMask, DL, IntVT);
return DAG.getSetCC(DL, ResultVT, ExpMaskV, AbsV, ISD::SETLT);
}
bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result,
SelectionDAG &DAG) const {
SDLoc dl(Node);

View File

@ -760,7 +760,6 @@ void TargetLoweringBase::initActions() {
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, VT, Expand);
setOperationAction(ISD::ISNAN, VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
setOperationAction(ISD::FMINNUM, VT, Expand);
setOperationAction(ISD::FMAXNUM, VT, Expand);

View File

@ -712,7 +712,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::LLROUND, MVT::f80, Expand);
setOperationAction(ISD::LRINT, MVT::f80, Custom);
setOperationAction(ISD::LLRINT, MVT::f80, Custom);
setOperationAction(ISD::ISNAN, MVT::f80, Custom);
// Handle constrained floating-point operations of scalar.
setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
@ -22156,45 +22155,6 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
return Res;
}
static SDValue lowerISNAN(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
SDValue Arg = Op.getOperand(0);
MVT ArgVT = Arg.getSimpleValueType();
MVT ResultVT = Op.getSimpleValueType();
// Determine classification of argument using instruction FXAM.
unsigned Opc;
switch (ArgVT.SimpleTy) {
default:
llvm_unreachable("Unexpected type!");
case MVT::f32:
Opc = X86::XAM_Fp32;
break;
case MVT::f64:
Opc = X86::XAM_Fp64;
break;
case MVT::f80:
Opc = X86::XAM_Fp80;
break;
}
SDValue Test(DAG.getMachineNode(Opc, DL, MVT::Glue, Arg), 0);
// Move FPSW to AX.
SDValue FNSTSW =
SDValue(DAG.getMachineNode(X86::FNSTSW16r, DL, MVT::i16, Test), 0);
// Extract upper 8-bits of AX.
SDValue Extract =
DAG.getTargetExtractSubreg(X86::sub_8bit_hi, DL, MVT::i8, FNSTSW);
// Mask all bits but C3, C2, C0.
Extract = DAG.getNode(ISD::AND, DL, MVT::i8, Extract,
DAG.getConstant(0x45, DL, MVT::i8));
return DAG.getSetCC(DL, ResultVT, Extract, DAG.getConstant(1, DL, MVT::i8),
ISD::CondCode::SETEQ);
}
/// Helper for creating a X86ISD::SETCC node.
static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
SelectionDAG &DAG) {
@ -30535,7 +30495,6 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
case ISD::ISNAN: return lowerISNAN(Op, DAG);
case ISD::LRINT:
case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
case ISD::SETCC:

View File

@ -1468,18 +1468,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
break;
}
case Intrinsic::isnan: {
Value *Arg = II->getArgOperand(0);
if (const auto *Inst = dyn_cast<Instruction>(Arg)) {
// If argument of this intrinsic call is an instruction that has 'nnan'
// flag, we can assume that NaN cannot be produced, otherwise it is
// undefined behavior.
if (Inst->getFastMathFlags().noNaNs())
return replaceInstUsesWith(
*II, ConstantInt::get(II->getType(), APInt::getNullValue(1)));
}
break;
}
case Intrinsic::copysign: {
Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
if (SignBitMustBeZero(Sign, &TLI)) {

View File

@ -1,490 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+bf16 | FileCheck %s -check-prefix=CHECK
define i1 @isnan_half(half %x) nounwind {
; CHECK-LABEL: isnan_half:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: and w8, w8, #0x7fff
; CHECK-NEXT: mov w9, #31744
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f16(half %x)
ret i1 %0
}
define i1 @isnan_float(float %x) nounwind {
; CHECK-LABEL: isnan_float:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmp s0, s0
; CHECK-NEXT: cset w0, vs
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f32(float %x)
ret i1 %0
}
define i1 @isnan_double(double %x) nounwind {
; CHECK-LABEL: isnan_double:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmp d0, d0
; CHECK-NEXT: cset w0, vs
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f64(double %x)
ret i1 %0
}
define i1 @isnan_ldouble(fp128 %x) nounwind {
; CHECK-LABEL: isnan_ldouble:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: bl __unordtf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f128(fp128 %x)
ret i1 %0
}
define i1 @isnan_half_strictfp(half %x) strictfp nounwind {
; CHECK-LABEL: isnan_half_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: and w8, w8, #0x7fff
; CHECK-NEXT: mov w9, #31744
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f16(half %x)
ret i1 %0
}
define i1 @isnan_bfloat_strictfp(bfloat %x) strictfp nounwind {
; CHECK-LABEL: isnan_bfloat_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: and w8, w8, #0x7fff
; CHECK-NEXT: mov w9, #32640
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.bf16(bfloat %x)
ret i1 %0
}
define i1 @isnan_float_strictfp(float %x) strictfp nounwind {
; CHECK-LABEL: isnan_float_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: and w8, w8, #0x7fffffff
; CHECK-NEXT: mov w9, #2139095040
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f32(float %x)
ret i1 %0
}
define i1 @isnan_double_strictfp(double %x) strictfp nounwind {
; CHECK-LABEL: isnan_double_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov x8, d0
; CHECK-NEXT: and x8, x8, #0x7fffffffffffffff
; CHECK-NEXT: mov x9, #9218868437227405312
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f64(double %x)
ret i1 %0
}
define i1 @isnan_ldouble_strictfp(fp128 %x) strictfp nounwind {
; CHECK-LABEL: isnan_ldouble_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str q0, [sp, #-16]!
; CHECK-NEXT: ldp x8, x9, [sp], #16
; CHECK-NEXT: mov x10, #9223090561878065152
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: and x8, x9, #0x7fffffffffffffff
; CHECK-NEXT: cset w9, ne
; CHECK-NEXT: cmp x8, x10
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: csel w0, w9, w8, eq
; CHECK-NEXT: ret
entry:
%0 = tail call i1 @llvm.isnan.f128(fp128 %x)
ret i1 %0
}
define <1 x i1> @isnan_half_vec1(<1 x half> %x) nounwind {
; CHECK-LABEL: isnan_half_vec1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $h0 killed $h0 def $q0
; CHECK-NEXT: umov w8, v0.h[0]
; CHECK-NEXT: and w8, w8, #0x7fff
; CHECK-NEXT: mov w9, #31744
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f16(<1 x half> %x)
ret <1 x i1> %0
}
define <1 x i1> @isnan_float_vec1(<1 x float> %x) nounwind {
; CHECK-LABEL: isnan_float_vec1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: fcmp s0, s0
; CHECK-NEXT: cset w0, vs
; CHECK-NEXT: ret
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
ret <1 x i1> %0
}
define <1 x i1> @isnan_double_vec1(<1 x double> %x) nounwind {
; CHECK-LABEL: isnan_double_vec1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmp d0, d0
; CHECK-NEXT: cset w0, vs
; CHECK-NEXT: ret
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
ret <1 x i1> %0
}
define <1 x i1> @isnan_ldouble_vec1(<1 x fp128> %x) nounwind {
; CHECK-LABEL: isnan_ldouble_vec1:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: bl __unordtf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f128(<1 x fp128> %x)
ret <1 x i1> %0
}
define <2 x i1> @isnan_half_vec2(<2 x half> %x) nounwind {
; CHECK-LABEL: isnan_half_vec2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w8, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[1]
; CHECK-NEXT: fmov s1, w8
; CHECK-NEXT: movi v0.2s, #127, msl #8
; CHECK-NEXT: mov v1.s[1], w9
; CHECK-NEXT: and v0.8b, v1.8b, v0.8b
; CHECK-NEXT: movi v1.2s, #124, lsl #8
; CHECK-NEXT: cmgt v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f16(<2 x half> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_float_vec2(<2 x float> %x) nounwind {
; CHECK-LABEL: isnan_float_vec2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmge v1.2s, v0.2s, #0.0
; CHECK-NEXT: fcmlt v0.2s, v0.2s, #0.0
; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mvn v0.8b, v0.8b
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_double_vec2(<2 x double> %x) nounwind {
; CHECK-LABEL: isnan_double_vec2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmge v1.2d, v0.2d, #0.0
; CHECK-NEXT: fcmlt v0.2d, v0.2d, #0.0
; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NEXT: mvn v0.16b, v0.16b
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_ldouble_vec2(<2 x fp128> %x) nounwind {
; CHECK-LABEL: isnan_ldouble_vec2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: bl __unordtf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: cset w8, ne
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: dup v0.2d, x8
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: bl __unordtf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: cset w8, ne
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: dup v0.2d, x8
; CHECK-NEXT: zip1 v0.4s, v0.4s, v1.4s
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f128(<2 x fp128> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_half_vec2_strictfp(<2 x half> %x) strictfp nounwind {
; CHECK-LABEL: isnan_half_vec2_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w8, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[1]
; CHECK-NEXT: fmov s1, w8
; CHECK-NEXT: movi v0.2s, #127, msl #8
; CHECK-NEXT: mov v1.s[1], w9
; CHECK-NEXT: and v0.8b, v1.8b, v0.8b
; CHECK-NEXT: movi v1.2s, #124, lsl #8
; CHECK-NEXT: cmgt v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f16(<2 x half> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_bfloat_vec2_strictfp(<2 x bfloat> %x) strictfp nounwind {
; CHECK-LABEL: isnan_bfloat_vec2_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: umov w8, v0.h[0]
; CHECK-NEXT: umov w9, v0.h[1]
; CHECK-NEXT: fmov s1, w8
; CHECK-NEXT: movi v0.2s, #127, msl #8
; CHECK-NEXT: mov w10, #32640
; CHECK-NEXT: mov v1.s[1], w9
; CHECK-NEXT: and v0.8b, v1.8b, v0.8b
; CHECK-NEXT: dup v1.2s, w10
; CHECK-NEXT: cmgt v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2bf16(<2 x bfloat> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_float_vec2_strictfp(<2 x float> %x) strictfp nounwind {
; CHECK-LABEL: isnan_float_vec2_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w8, #2139095040
; CHECK-NEXT: dup v1.2s, w8
; CHECK-NEXT: bic v0.2s, #128, lsl #24
; CHECK-NEXT: cmgt v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_double_vec2_strictfp(<2 x double> %x) strictfp nounwind {
; CHECK-LABEL: isnan_double_vec2_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov x8, #9223372036854775807
; CHECK-NEXT: mov x9, #9218868437227405312
; CHECK-NEXT: dup v1.2d, x8
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: dup v1.2d, x9
; CHECK-NEXT: cmgt v0.2d, v0.2d, v1.2d
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_ldouble_vec2_strictfp(<2 x fp128> %x) strictfp nounwind {
; CHECK-LABEL: isnan_ldouble_vec2_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: stp q0, q1, [sp, #-32]!
; CHECK-NEXT: ldp x11, x10, [sp, #16]
; CHECK-NEXT: ldp x8, x9, [sp]
; CHECK-NEXT: mov x12, #9223090561878065152
; CHECK-NEXT: and x10, x10, #0x7fffffffffffffff
; CHECK-NEXT: cmp x11, #0
; CHECK-NEXT: cset w11, ne
; CHECK-NEXT: cmp x10, x12
; CHECK-NEXT: cset w10, gt
; CHECK-NEXT: and x9, x9, #0x7fffffffffffffff
; CHECK-NEXT: csel w10, w11, w10, eq
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: sbfx x8, x10, #0, #1
; CHECK-NEXT: cset w10, ne
; CHECK-NEXT: cmp x9, x12
; CHECK-NEXT: dup v0.2d, x8
; CHECK-NEXT: cset w8, gt
; CHECK-NEXT: csel w8, w10, w8, eq
; CHECK-NEXT: sbfx x8, x8, #0, #1
; CHECK-NEXT: dup v1.2d, x8
; CHECK-NEXT: zip1 v0.4s, v1.4s, v0.4s
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f128(<2 x fp128> %x)
ret <2 x i1> %0
}
define <4 x i1> @isnan_half_vec4(<4 x half> %x) nounwind {
; CHECK-LABEL: isnan_half_vec4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.4h, #124, lsl #8
; CHECK-NEXT: bic v0.4h, #128, lsl #8
; CHECK-NEXT: cmgt v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f16(<4 x half> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_float_vec4(<4 x float> %x) nounwind {
; CHECK-LABEL: isnan_float_vec4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmge v1.4s, v0.4s, #0.0
; CHECK-NEXT: fcmlt v0.4s, v0.4s, #0.0
; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
; CHECK-NEXT: mvn v0.16b, v0.16b
; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_double_vec4(<4 x double> %x) nounwind {
; CHECK-LABEL: isnan_double_vec4:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fcmge v2.2d, v0.2d, #0.0
; CHECK-NEXT: fcmlt v0.2d, v0.2d, #0.0
; CHECK-NEXT: fcmge v3.2d, v1.2d, #0.0
; CHECK-NEXT: fcmlt v1.2d, v1.2d, #0.0
; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
; CHECK-NEXT: orr v1.16b, v1.16b, v3.16b
; CHECK-NEXT: mvn v0.16b, v0.16b
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: mvn v1.16b, v1.16b
; CHECK-NEXT: xtn2 v0.4s, v1.2d
; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_half_vec4_strictfp(<4 x half> %x) strictfp nounwind {
; CHECK-LABEL: isnan_half_vec4_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.4h, #124, lsl #8
; CHECK-NEXT: bic v0.4h, #128, lsl #8
; CHECK-NEXT: cmgt v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f16(<4 x half> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_bfloat_vec4_strictfp(<4 x bfloat> %x) strictfp nounwind {
; CHECK-LABEL: isnan_bfloat_vec4_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w8, #32640
; CHECK-NEXT: dup v1.4h, w8
; CHECK-NEXT: bic v0.4h, #128, lsl #8
; CHECK-NEXT: cmgt v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4bf16(<4 x bfloat> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_float_vec4_strictfp(<4 x float> %x) strictfp nounwind {
; CHECK-LABEL: isnan_float_vec4_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w8, #2139095040
; CHECK-NEXT: dup v1.4s, w8
; CHECK-NEXT: bic v0.4s, #128, lsl #24
; CHECK-NEXT: cmgt v0.4s, v0.4s, v1.4s
; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
ret <4 x i1> %0
}
define <4 x i1> @isnan_double_vec4_strictfp(<4 x double> %x) strictfp nounwind {
; CHECK-LABEL: isnan_double_vec4_strictfp:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov x8, #9223372036854775807
; CHECK-NEXT: mov x9, #9218868437227405312
; CHECK-NEXT: dup v2.2d, x8
; CHECK-NEXT: dup v3.2d, x9
; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
; CHECK-NEXT: cmgt v0.2d, v0.2d, v3.2d
; CHECK-NEXT: cmgt v1.2d, v1.2d, v3.2d
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: xtn2 v0.4s, v1.2d
; CHECK-NEXT: xtn v0.4h, v0.4s
; CHECK-NEXT: ret
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
ret <4 x i1> %0
}
declare i1 @llvm.isnan.f16(half)
declare i1 @llvm.isnan.bf16(bfloat)
declare i1 @llvm.isnan.f32(float)
declare i1 @llvm.isnan.f64(double)
declare i1 @llvm.isnan.f128(fp128)
declare <1 x i1> @llvm.isnan.v1f16(<1 x half>)
declare <1 x i1> @llvm.isnan.v1bf16(<1 x bfloat>)
declare <1 x i1> @llvm.isnan.v1f32(<1 x float>)
declare <1 x i1> @llvm.isnan.v1f64(<1 x double>)
declare <1 x i1> @llvm.isnan.v1f128(<1 x fp128>)
declare <2 x i1> @llvm.isnan.v2f16(<2 x half>)
declare <2 x i1> @llvm.isnan.v2bf16(<2 x bfloat>)
declare <2 x i1> @llvm.isnan.v2f32(<2 x float>)
declare <2 x i1> @llvm.isnan.v2f64(<2 x double>)
declare <2 x i1> @llvm.isnan.v2f128(<2 x fp128>)
declare <4 x i1> @llvm.isnan.v4f16(<4 x half>)
declare <4 x i1> @llvm.isnan.v4bf16(<4 x bfloat>)
declare <4 x i1> @llvm.isnan.v4f32(<4 x float>)
declare <4 x i1> @llvm.isnan.v4f64(<4 x double>)
declare <4 x i1> @llvm.isnan.v4f128(<4 x fp128>)

View File

@ -1,535 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpcle-unknown-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
define i1 @isnan_float(float %x) nounwind {
; CHECK-LABEL: isnan_float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: fcmpu 0, 1, 1
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 3, .LBB0_1
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB0_1: # %entry
; CHECK-NEXT: addi 3, 4, 0
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.f32(float %x)
ret i1 %0
}
define i1 @isnan_double(double %x) nounwind {
; CHECK-LABEL: isnan_double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: fcmpu 0, 1, 1
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 3, .LBB1_1
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB1_1: # %entry
; CHECK-NEXT: addi 3, 4, 0
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.f64(double %x)
ret i1 %0
}
define i1 @isnan_ldouble(ppc_fp128 %x) nounwind {
; CHECK-LABEL: isnan_ldouble:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -32(1)
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 4, 20(1)
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: lwz 5, 28(1)
; CHECK-NEXT: cmplw 1, 4, 3
; CHECK-NEXT: lwz 3, 24(1)
; CHECK-NEXT: xoris 4, 4, 32752
; CHECK-NEXT: lwz 6, 16(1)
; CHECK-NEXT: clrlwi. 5, 5, 1
; CHECK-NEXT: cmplwi 5, 5, 0
; CHECK-NEXT: crandc 24, 1, 22
; CHECK-NEXT: cmpwi 3, 0
; CHECK-NEXT: crandc 20, 22, 2
; CHECK-NEXT: cmpwi 6, 0
; CHECK-NEXT: cmplwi 7, 4, 0
; CHECK-NEXT: or 3, 3, 5
; CHECK-NEXT: crandc 21, 5, 30
; CHECK-NEXT: crandc 22, 30, 2
; CHECK-NEXT: cmplwi 3, 0
; CHECK-NEXT: cror 20, 20, 24
; CHECK-NEXT: cror 21, 22, 21
; CHECK-NEXT: crandc 20, 20, 2
; CHECK-NEXT: crand 21, 2, 21
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: bc 12, 20, .LBB2_1
; CHECK-NEXT: b .LBB2_2
; CHECK-NEXT: .LBB2_1: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB2_2: # %entry
; CHECK-NEXT: addi 1, 1, 32
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.ppcf128(ppc_fp128 %x)
ret i1 %0
}
define i1 @isnan_float_strictfp(float %x) strictfp nounwind {
; CHECK-LABEL: isnan_float_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -16(1)
; CHECK-NEXT: stfs 1, 12(1)
; CHECK-NEXT: lis 3, 32640
; CHECK-NEXT: lwz 4, 12(1)
; CHECK-NEXT: clrlwi 4, 4, 1
; CHECK-NEXT: cmpw 4, 3
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 1, .LBB3_1
; CHECK-NEXT: b .LBB3_2
; CHECK-NEXT: .LBB3_1: # %entry
; CHECK-NEXT: addi 3, 4, 0
; CHECK-NEXT: .LBB3_2: # %entry
; CHECK-NEXT: addi 1, 1, 16
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.f32(float %x)
ret i1 %0
}
define i1 @isnan_double_strictfp(double %x) strictfp nounwind {
; CHECK-LABEL: isnan_double_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -16(1)
; CHECK-NEXT: stfd 1, 8(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 4, 12(1)
; CHECK-NEXT: lwz 5, 8(1)
; CHECK-NEXT: clrlwi 4, 4, 1
; CHECK-NEXT: cmpw 4, 3
; CHECK-NEXT: xoris 3, 4, 32752
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: crandc 20, 1, 6
; CHECK-NEXT: cmpwi 5, 0
; CHECK-NEXT: crandc 21, 6, 2
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: bc 12, 20, .LBB4_1
; CHECK-NEXT: b .LBB4_2
; CHECK-NEXT: .LBB4_1: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB4_2: # %entry
; CHECK-NEXT: addi 1, 1, 16
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.f64(double %x)
ret i1 %0
}
define i1 @isnan_ldouble_strictfp(ppc_fp128 %x) strictfp nounwind {
; CHECK-LABEL: isnan_ldouble_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -32(1)
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 4, 20(1)
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: lwz 5, 28(1)
; CHECK-NEXT: cmplw 1, 4, 3
; CHECK-NEXT: lwz 3, 24(1)
; CHECK-NEXT: xoris 4, 4, 32752
; CHECK-NEXT: lwz 6, 16(1)
; CHECK-NEXT: clrlwi. 5, 5, 1
; CHECK-NEXT: cmplwi 5, 5, 0
; CHECK-NEXT: crandc 24, 1, 22
; CHECK-NEXT: cmpwi 3, 0
; CHECK-NEXT: crandc 20, 22, 2
; CHECK-NEXT: cmpwi 6, 0
; CHECK-NEXT: cmplwi 7, 4, 0
; CHECK-NEXT: or 3, 3, 5
; CHECK-NEXT: crandc 21, 5, 30
; CHECK-NEXT: crandc 22, 30, 2
; CHECK-NEXT: cmplwi 3, 0
; CHECK-NEXT: cror 20, 20, 24
; CHECK-NEXT: cror 21, 22, 21
; CHECK-NEXT: crandc 20, 20, 2
; CHECK-NEXT: crand 21, 2, 21
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: bc 12, 20, .LBB5_1
; CHECK-NEXT: b .LBB5_2
; CHECK-NEXT: .LBB5_1: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB5_2: # %entry
; CHECK-NEXT: addi 1, 1, 32
; CHECK-NEXT: blr
entry:
%0 = tail call i1 @llvm.isnan.ppcf128(ppc_fp128 %x)
ret i1 %0
}
define <1 x i1> @isnan_float_vec1(<1 x float> %x) nounwind {
; CHECK-LABEL: isnan_float_vec1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: fcmpu 0, 1, 1
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 3, .LBB6_1
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB6_1: # %entry
; CHECK-NEXT: addi 3, 4, 0
; CHECK-NEXT: blr
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
ret <1 x i1> %0
}
define <1 x i1> @isnan_double_vec1(<1 x double> %x) nounwind {
; CHECK-LABEL: isnan_double_vec1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: fcmpu 0, 1, 1
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 3, .LBB7_1
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB7_1: # %entry
; CHECK-NEXT: addi 3, 4, 0
; CHECK-NEXT: blr
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
ret <1 x i1> %0
}
define <1 x i1> @isnan_ldouble_vec1(<1 x ppc_fp128> %x) nounwind {
; CHECK-LABEL: isnan_ldouble_vec1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -32(1)
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 4, 20(1)
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: lwz 5, 28(1)
; CHECK-NEXT: cmplw 1, 4, 3
; CHECK-NEXT: lwz 3, 24(1)
; CHECK-NEXT: xoris 4, 4, 32752
; CHECK-NEXT: lwz 6, 16(1)
; CHECK-NEXT: clrlwi. 5, 5, 1
; CHECK-NEXT: cmplwi 5, 5, 0
; CHECK-NEXT: crandc 24, 1, 22
; CHECK-NEXT: cmpwi 3, 0
; CHECK-NEXT: crandc 20, 22, 2
; CHECK-NEXT: cmpwi 6, 0
; CHECK-NEXT: cmplwi 7, 4, 0
; CHECK-NEXT: or 3, 3, 5
; CHECK-NEXT: crandc 21, 5, 30
; CHECK-NEXT: crandc 22, 30, 2
; CHECK-NEXT: cmplwi 3, 0
; CHECK-NEXT: cror 20, 20, 24
; CHECK-NEXT: cror 21, 22, 21
; CHECK-NEXT: crandc 20, 20, 2
; CHECK-NEXT: crand 21, 2, 21
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: li 3, 1
; CHECK-NEXT: bc 12, 20, .LBB8_1
; CHECK-NEXT: b .LBB8_2
; CHECK-NEXT: .LBB8_1: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB8_2: # %entry
; CHECK-NEXT: addi 1, 1, 32
; CHECK-NEXT: blr
entry:
%0 = tail call <1 x i1> @llvm.isnan.v1ppcf128(<1 x ppc_fp128> %x)
ret <1 x i1> %0
}
define <2 x i1> @isnan_float_vec2(<2 x float> %x) nounwind {
; CHECK-LABEL: isnan_float_vec2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: fcmpu 0, 2, 2
; CHECK-NEXT: fcmpu 1, 1, 1
; CHECK-NEXT: li 5, 1
; CHECK-NEXT: bc 12, 7, .LBB9_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB9_3
; CHECK-NEXT: .LBB9_2: # %entry
; CHECK-NEXT: addi 3, 5, 0
; CHECK-NEXT: .LBB9_3: # %entry
; CHECK-NEXT: bc 12, 3, .LBB9_4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB9_4: # %entry
; CHECK-NEXT: addi 4, 5, 0
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_double_vec2(<2 x double> %x) nounwind {
; CHECK-LABEL: isnan_double_vec2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: fcmpu 0, 2, 2
; CHECK-NEXT: fcmpu 1, 1, 1
; CHECK-NEXT: li 5, 1
; CHECK-NEXT: bc 12, 7, .LBB10_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB10_3
; CHECK-NEXT: .LBB10_2: # %entry
; CHECK-NEXT: addi 3, 5, 0
; CHECK-NEXT: .LBB10_3: # %entry
; CHECK-NEXT: bc 12, 3, .LBB10_4
; CHECK-NEXT: blr
; CHECK-NEXT: .LBB10_4: # %entry
; CHECK-NEXT: addi 4, 5, 0
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_ldouble_vec2(<2 x ppc_fp128> %x) nounwind {
; CHECK-LABEL: isnan_ldouble_vec2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -48(1)
; CHECK-NEXT: stfd 3, 32(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 8, 32(1)
; CHECK-NEXT: stfd 4, 40(1)
; CHECK-NEXT: lwz 9, 44(1)
; CHECK-NEXT: cmpwi 1, 8, 0
; CHECK-NEXT: lwz 10, 36(1)
; CHECK-NEXT: lwz 8, 40(1)
; CHECK-NEXT: clrlwi. 9, 9, 1
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: cmplwi 5, 9, 0
; CHECK-NEXT: lwz 5, 20(1)
; CHECK-NEXT: crandc 24, 1, 22
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: cmpwi 8, 0
; CHECK-NEXT: lwz 4, 16(1)
; CHECK-NEXT: cmplw 7, 10, 3
; CHECK-NEXT: lwz 7, 28(1)
; CHECK-NEXT: xoris 10, 10, 32752
; CHECK-NEXT: crandc 20, 22, 2
; CHECK-NEXT: cmplwi 10, 0
; CHECK-NEXT: lwz 6, 24(1)
; CHECK-NEXT: crandc 21, 29, 2
; CHECK-NEXT: cmplw 7, 5, 3
; CHECK-NEXT: xoris 3, 5, 32752
; CHECK-NEXT: crandc 22, 2, 6
; CHECK-NEXT: cmplwi 3, 0
; CHECK-NEXT: cmpwi 1, 4, 0
; CHECK-NEXT: crandc 23, 29, 2
; CHECK-NEXT: crandc 25, 2, 6
; CHECK-NEXT: clrlwi. 3, 7, 1
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: crandc 26, 1, 6
; CHECK-NEXT: cmpwi 6, 0
; CHECK-NEXT: or 4, 8, 9
; CHECK-NEXT: crandc 27, 6, 2
; CHECK-NEXT: cmplwi 4, 0
; CHECK-NEXT: or 3, 6, 3
; CHECK-NEXT: cror 20, 20, 24
; CHECK-NEXT: cror 21, 22, 21
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: cror 22, 25, 23
; CHECK-NEXT: crandc 20, 20, 2
; CHECK-NEXT: crand 21, 2, 21
; CHECK-NEXT: cror 23, 27, 26
; CHECK-NEXT: crand 22, 6, 22
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: crandc 21, 23, 6
; CHECK-NEXT: crnor 21, 22, 21
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 21, .LBB11_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB11_3
; CHECK-NEXT: .LBB11_2: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB11_3: # %entry
; CHECK-NEXT: bc 12, 20, .LBB11_4
; CHECK-NEXT: b .LBB11_5
; CHECK-NEXT: .LBB11_4: # %entry
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: .LBB11_5: # %entry
; CHECK-NEXT: addi 1, 1, 48
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_float_vec2_strictfp(<2 x float> %x) strictfp nounwind {
; CHECK-LABEL: isnan_float_vec2_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -16(1)
; CHECK-NEXT: stfs 1, 8(1)
; CHECK-NEXT: lis 3, 32640
; CHECK-NEXT: stfs 2, 12(1)
; CHECK-NEXT: lwz 4, 12(1)
; CHECK-NEXT: lwz 5, 8(1)
; CHECK-NEXT: clrlwi 4, 4, 1
; CHECK-NEXT: cmpw 4, 3
; CHECK-NEXT: clrlwi 5, 5, 1
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: cmpw 1, 5, 3
; CHECK-NEXT: li 5, 1
; CHECK-NEXT: bc 12, 5, .LBB12_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB12_3
; CHECK-NEXT: .LBB12_2: # %entry
; CHECK-NEXT: addi 3, 5, 0
; CHECK-NEXT: .LBB12_3: # %entry
; CHECK-NEXT: bc 12, 1, .LBB12_4
; CHECK-NEXT: b .LBB12_5
; CHECK-NEXT: .LBB12_4: # %entry
; CHECK-NEXT: addi 4, 5, 0
; CHECK-NEXT: .LBB12_5: # %entry
; CHECK-NEXT: addi 1, 1, 16
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_double_vec2_strictfp(<2 x double> %x) strictfp nounwind {
; CHECK-LABEL: isnan_double_vec2_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -32(1)
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 5, 28(1)
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: lwz 6, 20(1)
; CHECK-NEXT: clrlwi 5, 5, 1
; CHECK-NEXT: lwz 7, 24(1)
; CHECK-NEXT: cmpw 5, 3
; CHECK-NEXT: xoris 5, 5, 32752
; CHECK-NEXT: lwz 4, 16(1)
; CHECK-NEXT: cmplwi 1, 5, 0
; CHECK-NEXT: crandc 20, 1, 6
; CHECK-NEXT: cmpwi 7, 0
; CHECK-NEXT: clrlwi 5, 6, 1
; CHECK-NEXT: crandc 21, 6, 2
; CHECK-NEXT: cmpw 5, 3
; CHECK-NEXT: xoris 3, 5, 32752
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: crandc 22, 1, 6
; CHECK-NEXT: cmpwi 4, 0
; CHECK-NEXT: crandc 23, 6, 2
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: crnor 21, 23, 22
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 21, .LBB13_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB13_3
; CHECK-NEXT: .LBB13_2: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB13_3: # %entry
; CHECK-NEXT: bc 12, 20, .LBB13_4
; CHECK-NEXT: b .LBB13_5
; CHECK-NEXT: .LBB13_4: # %entry
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: .LBB13_5: # %entry
; CHECK-NEXT: addi 1, 1, 32
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
ret <2 x i1> %0
}
define <2 x i1> @isnan_ldouble_vec2_strictfp(<2 x ppc_fp128> %x) strictfp nounwind {
; CHECK-LABEL: isnan_ldouble_vec2_strictfp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stwu 1, -48(1)
; CHECK-NEXT: stfd 3, 32(1)
; CHECK-NEXT: lis 3, 32752
; CHECK-NEXT: lwz 8, 32(1)
; CHECK-NEXT: stfd 4, 40(1)
; CHECK-NEXT: lwz 9, 44(1)
; CHECK-NEXT: cmpwi 1, 8, 0
; CHECK-NEXT: lwz 10, 36(1)
; CHECK-NEXT: lwz 8, 40(1)
; CHECK-NEXT: clrlwi. 9, 9, 1
; CHECK-NEXT: stfd 1, 16(1)
; CHECK-NEXT: cmplwi 5, 9, 0
; CHECK-NEXT: lwz 5, 20(1)
; CHECK-NEXT: crandc 24, 1, 22
; CHECK-NEXT: stfd 2, 24(1)
; CHECK-NEXT: cmpwi 8, 0
; CHECK-NEXT: lwz 4, 16(1)
; CHECK-NEXT: cmplw 7, 10, 3
; CHECK-NEXT: lwz 7, 28(1)
; CHECK-NEXT: xoris 10, 10, 32752
; CHECK-NEXT: crandc 20, 22, 2
; CHECK-NEXT: cmplwi 10, 0
; CHECK-NEXT: lwz 6, 24(1)
; CHECK-NEXT: crandc 21, 29, 2
; CHECK-NEXT: cmplw 7, 5, 3
; CHECK-NEXT: xoris 3, 5, 32752
; CHECK-NEXT: crandc 22, 2, 6
; CHECK-NEXT: cmplwi 3, 0
; CHECK-NEXT: cmpwi 1, 4, 0
; CHECK-NEXT: crandc 23, 29, 2
; CHECK-NEXT: crandc 25, 2, 6
; CHECK-NEXT: clrlwi. 3, 7, 1
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: crandc 26, 1, 6
; CHECK-NEXT: cmpwi 6, 0
; CHECK-NEXT: or 4, 8, 9
; CHECK-NEXT: crandc 27, 6, 2
; CHECK-NEXT: cmplwi 4, 0
; CHECK-NEXT: or 3, 6, 3
; CHECK-NEXT: cror 20, 20, 24
; CHECK-NEXT: cror 21, 22, 21
; CHECK-NEXT: cmplwi 1, 3, 0
; CHECK-NEXT: cror 22, 25, 23
; CHECK-NEXT: crandc 20, 20, 2
; CHECK-NEXT: crand 21, 2, 21
; CHECK-NEXT: cror 23, 27, 26
; CHECK-NEXT: crand 22, 6, 22
; CHECK-NEXT: crnor 20, 21, 20
; CHECK-NEXT: crandc 21, 23, 6
; CHECK-NEXT: crnor 21, 22, 21
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: bc 12, 21, .LBB14_2
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ori 3, 4, 0
; CHECK-NEXT: b .LBB14_3
; CHECK-NEXT: .LBB14_2: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: .LBB14_3: # %entry
; CHECK-NEXT: bc 12, 20, .LBB14_4
; CHECK-NEXT: b .LBB14_5
; CHECK-NEXT: .LBB14_4: # %entry
; CHECK-NEXT: li 4, 0
; CHECK-NEXT: .LBB14_5: # %entry
; CHECK-NEXT: addi 1, 1, 48
; CHECK-NEXT: blr
entry:
%0 = tail call <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128> %x)
ret <2 x i1> %0
}
declare i1 @llvm.isnan.f32(float)
declare i1 @llvm.isnan.f64(double)
declare i1 @llvm.isnan.ppcf128(ppc_fp128)
declare <1 x i1> @llvm.isnan.v1f32(<1 x float>)
declare <1 x i1> @llvm.isnan.v1f64(<1 x double>)
declare <1 x i1> @llvm.isnan.v1ppcf128(<1 x ppc_fp128>)
declare <2 x i1> @llvm.isnan.v2f32(<2 x float>)
declare <2 x i1> @llvm.isnan.v2f64(<2 x double>)
declare <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128>)

File diff suppressed because it is too large Load Diff

View File

@ -1,66 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -instcombine < %s | FileCheck %s
define i1 @isnan_f32_noflags(float %x, float %y) {
; CHECK-LABEL: @isnan_f32_noflags(
; CHECK-NEXT: [[R:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T:%.*]] = call i1 @llvm.isnan.f32(float [[R]])
; CHECK-NEXT: ret i1 [[T]]
;
%r = fmul float %x, %y
%t = call i1 @llvm.isnan.f32(float %r)
ret i1 %t
}
define i1 @isnan_f32_ninf(float %x, float %y) {
; CHECK-LABEL: @isnan_f32_ninf(
; CHECK-NEXT: [[R:%.*]] = fsub ninf float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T:%.*]] = call i1 @llvm.isnan.f32(float [[R]])
; CHECK-NEXT: ret i1 [[T]]
;
%r = fsub ninf float %x, %y
%t = call i1 @llvm.isnan.f32(float %r)
ret i1 %t
}
define i1 @isnan_f32_nsz(float %x, float %y) {
; CHECK-LABEL: @isnan_f32_nsz(
; CHECK-NEXT: [[R:%.*]] = fdiv nsz float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T:%.*]] = call i1 @llvm.isnan.f32(float [[R]])
; CHECK-NEXT: ret i1 [[T]]
;
%r = fdiv nsz float %x, %y
%t = call i1 @llvm.isnan.f32(float %r)
ret i1 %t
}
define i1 @isnan_f32(float %x, float %y) {
; CHECK-LABEL: @isnan_f32(
; CHECK-NEXT: ret i1 false
;
%r = fadd nnan float %x, %y
%t = call i1 @llvm.isnan.f32(float %r)
ret i1 %t
}
define <1 x i1> @isnan_v1f32(<1 x float> %x, <1 x float> %y) {
; CHECK-LABEL: @isnan_v1f32(
; CHECK-NEXT: ret <1 x i1> zeroinitializer
;
%r = fadd nnan <1 x float> %x, %y
%t = call <1 x i1> @llvm.isnan.v1f32(<1 x float> %r)
ret <1 x i1> %t
}
define <2 x i1> @isnan_v2f32(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @isnan_v2f32(
; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
%r = fadd nnan <2 x float> %x, %y
%t = call <2 x i1> @llvm.isnan.v2f32(<2 x float> %r)
ret <2 x i1> %t
}
declare i1 @llvm.isnan.f32(float %r)
declare <1 x i1> @llvm.isnan.v1f32(<1 x float> %r)
declare <2 x i1> @llvm.isnan.v2f32(<2 x float> %r)

View File

@ -1,35 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
define i1 @isnan_01() {
; CHECK-LABEL: @isnan_01(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i1 true
;
entry:
%0 = tail call i1 @llvm.isnan.f32(float 0x7FF8000000000000)
ret i1 %0
}
define i1 @isnan_02() {
; CHECK-LABEL: @isnan_02(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret i1 false
;
entry:
%0 = tail call i1 @llvm.isnan.f32(float 0x7FF0000000000000)
ret i1 %0
}
define <4 x i1> @isnan_03() {
; CHECK-LABEL: @isnan_03(
; CHECK-NEXT: entry:
; CHECK-NEXT: ret <4 x i1> <i1 true, i1 false, i1 false, i1 true>
;
entry:
%0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float><float 0x7FF8000000000000, float 0x7FF0000000000000, float 1.0, float 0xFFF8000000000000>)
ret <4 x i1> %0
}
declare i1 @llvm.isnan.f32(float)
declare <4 x i1> @llvm.isnan.v4f32(<4 x float>)