forked from OSchip/llvm-project
[AArch64] Add NEON scalar floating-point compare LLVM AArch64 intrinsics that
use f32/f64 types, rather than their vector equivalents. llvm-svn: 197068
This commit is contained in:
parent
473a01e1c9
commit
088f93d4b5
|
@ -282,35 +282,50 @@ def int_aarch64_neon_vrsqrts :
|
|||
Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem]>;
|
||||
|
||||
class Neon_Cmp_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
|
||||
[IntrNoMem]>;
|
||||
// Compare with vector operands.
|
||||
class Neon_Cmp_Intrinsic :
|
||||
Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
|
||||
[IntrNoMem]>;
|
||||
|
||||
// Floating-point compare with scalar operands.
|
||||
class Neon_Float_Cmp_Intrinsic :
|
||||
Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_anyfloat_ty],
|
||||
[IntrNoMem]>;
|
||||
|
||||
// Scalar Compare Equal
|
||||
def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fceq : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Compare Greater-Than or Equal
|
||||
def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fcge : Neon_Float_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fchs : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Compare Less-Than or Equal
|
||||
def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fclez : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Compare Less-Than
|
||||
def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fcltz : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Compare Greater-Than
|
||||
def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fcgt : Neon_Float_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fchi : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Compare Bitwise Test Bits
|
||||
def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Floating-point Absolute Compare Greater Than Or Equal
|
||||
def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic;
|
||||
|
||||
def int_aarch64_neon_fcage : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Floating-point Absolute Compare Greater Than
|
||||
def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic;
|
||||
def int_aarch64_neon_fcagt : Neon_Float_Cmp_Intrinsic;
|
||||
|
||||
// Scalar Signed Saturating Accumulated of Unsigned Value
|
||||
def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic;
|
||||
|
|
|
@ -4201,9 +4201,9 @@ multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
|
|||
multiclass Neon_Scalar3Same_cmp_SD_size_patterns<SDPatternOperator opnode,
|
||||
Instruction INSTS,
|
||||
Instruction INSTD> {
|
||||
def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
|
||||
def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
|
||||
(INSTS FPR32:$Rn, FPR32:$Rm)>;
|
||||
def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
|
||||
def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
|
||||
(INSTD FPR64:$Rn, FPR64:$Rm)>;
|
||||
}
|
||||
|
||||
|
@ -4396,11 +4396,9 @@ class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
|
|||
multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
|
||||
Instruction INSTS,
|
||||
Instruction INSTD> {
|
||||
def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
|
||||
(v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
|
||||
def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpz32:$FPImm))),
|
||||
(INSTS FPR32:$Rn, fpz32:$FPImm)>;
|
||||
def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
|
||||
(v1f32 (scalar_to_vector (f32 fpz32:$FPImm))))),
|
||||
def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpz32:$FPImm))),
|
||||
(INSTD FPR64:$Rn, fpz32:$FPImm)>;
|
||||
}
|
||||
|
||||
|
@ -5163,58 +5161,62 @@ def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
|
|||
|
||||
// Scalar Floating-point Compare Mask Equal
|
||||
defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vceq,
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_fceq,
|
||||
FCMEQsss, FCMEQddd>;
|
||||
def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Equal To Zero
|
||||
defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fceq,
|
||||
FCMEQZssi, FCMEQZddi>;
|
||||
def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpz32:$FPImm), SETEQ)),
|
||||
(FCMEQZddi FPR64:$Rn, fpz32:$FPImm)>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Greater Than Or Equal
|
||||
defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcge,
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_fcge,
|
||||
FCMGEsss, FCMGEddd>;
|
||||
def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
|
||||
defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcge,
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcge,
|
||||
FCMGEZssi, FCMGEZddi>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Greather Than
|
||||
defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcgt,
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_fcgt,
|
||||
FCMGTsss, FCMGTddd>;
|
||||
def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Greather Than Zero
|
||||
defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcgt,
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcgt,
|
||||
FCMGTZssi, FCMGTZddi>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Less Than Or Equal To Zero
|
||||
defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vclez,
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fclez,
|
||||
FCMLEZssi, FCMLEZddi>;
|
||||
|
||||
// Scalar Floating-point Compare Mask Less Than Zero
|
||||
defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcltz,
|
||||
defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcltz,
|
||||
FCMLTZssi, FCMLTZddi>;
|
||||
|
||||
// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
|
||||
defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcage,
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_fcage,
|
||||
FACGEsss, FACGEddd>;
|
||||
def : Pat<(v1i64 (int_aarch64_neon_vcage (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
|
||||
(FACGEddd FPR64:$Rn, FPR64:$Rm)>;
|
||||
|
||||
// Scalar Floating-point Absolute Compare Mask Greater Than
|
||||
defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcagt,
|
||||
defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_fcagt,
|
||||
FACGTsss, FACGTddd>;
|
||||
def : Pat<(v1i64 (int_aarch64_neon_vcagt (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
|
||||
(FACGTddd FPR64:$Rn, FPR64:$Rm)>;
|
||||
|
||||
// Scakar Floating-point Absolute Difference
|
||||
defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
|
||||
|
|
|
@ -3,29 +3,25 @@
|
|||
;; Scalar Floating-point Compare
|
||||
|
||||
define i32 @test_vceqs_f32(float %a, float %b) {
|
||||
; CHECK: test_vceqs_f32
|
||||
; CHECK-LABEL: test_vceqs_f32
|
||||
; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vceq.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vceq1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vceq2.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> %vceq1.i)
|
||||
%0 = extractelement <1 x i32> %vceq2.i, i32 0
|
||||
%fceq2.i = call <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fceq2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vceqd_f64(double %a, double %b) {
|
||||
; CHECK: test_vceqd_f64
|
||||
; CHECK-LABEL: test_vceqd_f64
|
||||
; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vceq.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vceq1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> %vceq1.i)
|
||||
%0 = extractelement <1 x i64> %vceq2.i, i32 0
|
||||
%fceq2.i = call <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fceq2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define <1 x i64> @test_vceqz_f64(<1 x double> %a) #0 {
|
||||
; CHECK: test_vceqz_f64
|
||||
define <1 x i64> @test_vceqz_f64(<1 x double> %a) {
|
||||
; CHECK-LABEL: test_vceqz_f64
|
||||
; CHECK: fcmeq {{d[0-9]+}}, {{d[0-9]+}}, #0.0
|
||||
entry:
|
||||
%0 = fcmp oeq <1 x double> %a, zeroinitializer
|
||||
|
@ -34,295 +30,253 @@ entry:
|
|||
}
|
||||
|
||||
define i32 @test_vceqzs_f32(float %a) {
|
||||
; CHECK: test_vceqzs_f32
|
||||
; CHECK-LABEL: test_vceqzs_f32
|
||||
; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
entry:
|
||||
%vceq.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vceq1.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> zeroinitializer)
|
||||
%0 = extractelement <1 x i32> %vceq1.i, i32 0
|
||||
%fceq1.i = call <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float %a, float 0.0)
|
||||
%0 = extractelement <1 x i32> %fceq1.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vceqzd_f64(double %a) {
|
||||
; CHECK: test_vceqzd_f64
|
||||
; CHECK-LABEL: test_vceqzd_f64
|
||||
; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vceq.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vceq1.i = tail call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double> %vceq.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vceq1.i, i32 0
|
||||
%fceq1.i = call <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f32(double %a, float 0.0)
|
||||
%0 = extractelement <1 x i64> %fceq1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcges_f32(float %a, float %b) {
|
||||
; CHECK: test_vcges_f32
|
||||
; CHECK-LABEL: test_vcges_f32
|
||||
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcge1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
|
||||
%0 = extractelement <1 x i32> %vcge2.i, i32 0
|
||||
%fcge2.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcge2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcged_f64(double %a, double %b) {
|
||||
; CHECK: test_vcged_f64
|
||||
; CHECK-LABEL: test_vcged_f64
|
||||
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcge1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
|
||||
%0 = extractelement <1 x i64> %vcge2.i, i32 0
|
||||
%fcge2.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcge2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcgezs_f32(float %a) {
|
||||
; CHECK: test_vcgezs_f32
|
||||
; CHECK-LABEL: test_vcgezs_f32
|
||||
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcge1.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> zeroinitializer)
|
||||
%0 = extractelement <1 x i32> %vcge1.i, i32 0
|
||||
%fcge1.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float 0.0)
|
||||
%0 = extractelement <1 x i32> %fcge1.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcgezd_f64(double %a) {
|
||||
; CHECK: test_vcgezd_f64
|
||||
; CHECK-LABEL: test_vcgezd_f64
|
||||
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcge1.i = tail call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double> %vcge.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcge1.i, i32 0
|
||||
%fcge1.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f32(double %a, float 0.0)
|
||||
%0 = extractelement <1 x i64> %fcge1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcgts_f32(float %a, float %b) {
|
||||
; CHECK: test_vcgts_f32
|
||||
; CHECK-LABEL: test_vcgts_f32
|
||||
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcgt1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
|
||||
%0 = extractelement <1 x i32> %vcgt2.i, i32 0
|
||||
%fcgt2.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcgt2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcgtd_f64(double %a, double %b) {
|
||||
; CHECK: test_vcgtd_f64
|
||||
; CHECK-LABEL: test_vcgtd_f64
|
||||
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcgt1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
|
||||
%0 = extractelement <1 x i64> %vcgt2.i, i32 0
|
||||
%fcgt2.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcgt2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcgtzs_f32(float %a) {
|
||||
; CHECK: test_vcgtzs_f32
|
||||
; CHECK-LABEL: test_vcgtzs_f32
|
||||
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcgt1.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> zeroinitializer)
|
||||
%0 = extractelement <1 x i32> %vcgt1.i, i32 0
|
||||
%fcgt1.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float 0.0)
|
||||
%0 = extractelement <1 x i32> %fcgt1.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcgtzd_f64(double %a) {
|
||||
; CHECK: test_vcgtzd_f64
|
||||
; CHECK-LABEL: test_vcgtzd_f64
|
||||
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcgt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double> %vcgt.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcgt1.i, i32 0
|
||||
%fcgt1.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f32(double %a, float 0.0)
|
||||
%0 = extractelement <1 x i64> %fcgt1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcles_f32(float %a, float %b) {
|
||||
; CHECK: test_vcles_f32
|
||||
; CHECK-LABEL: test_vcles_f32
|
||||
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcge1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
|
||||
%0 = extractelement <1 x i32> %vcge2.i, i32 0
|
||||
%fcge2.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcge2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcled_f64(double %a, double %b) {
|
||||
; CHECK: test_vcled_f64
|
||||
; CHECK-LABEL: test_vcled_f64
|
||||
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcge.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcge1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
|
||||
%0 = extractelement <1 x i64> %vcge2.i, i32 0
|
||||
%fcge2.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcge2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vclezs_f32(float %a) {
|
||||
; CHECK: test_vclezs_f32
|
||||
; CHECK-LABEL: test_vclezs_f32
|
||||
; CHECK: fcmle {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcle.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcle1.i = call <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float> %vcle.i, <1 x float> zeroinitializer)
|
||||
%0 = extractelement <1 x i32> %vcle1.i, i32 0
|
||||
%fcle1.i = call <1 x i32> @llvm.aarch64.neon.fclez.v1i32.f32.f32(float %a, float 0.0)
|
||||
%0 = extractelement <1 x i32> %fcle1.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vclezd_f64(double %a) {
|
||||
; CHECK: test_vclezd_f64
|
||||
; CHECK-LABEL: test_vclezd_f64
|
||||
; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vcle.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcle1.i = tail call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double> %vcle.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vcle1.i, i32 0
|
||||
%fcle1.i = call <1 x i64> @llvm.aarch64.neon.fclez.v1i64.f64.f32(double %a, float 0.0)
|
||||
%0 = extractelement <1 x i64> %fcle1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vclts_f32(float %a, float %b) {
|
||||
; CHECK: test_vclts_f32
|
||||
; CHECK-LABEL: test_vclts_f32
|
||||
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcgt1.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
|
||||
%0 = extractelement <1 x i32> %vcgt2.i, i32 0
|
||||
%fcgt2.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcgt2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcltd_f64(double %a, double %b) {
|
||||
; CHECK: test_vcltd_f64
|
||||
; CHECK-LABEL: test_vcltd_f64
|
||||
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcgt.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcgt1.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
|
||||
%0 = extractelement <1 x i64> %vcgt2.i, i32 0
|
||||
%fcgt2.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcgt2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcltzs_f32(float %a) {
|
||||
; CHECK: test_vcltzs_f32
|
||||
; CHECK-LABEL: test_vcltzs_f32
|
||||
; CHECK: fcmlt {{s[0-9]}}, {{s[0-9]}}, #0.0
|
||||
entry:
|
||||
%vclt.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vclt1.i = call <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float> %vclt.i, <1 x float> zeroinitializer)
|
||||
%0 = extractelement <1 x i32> %vclt1.i, i32 0
|
||||
%fclt1.i = call <1 x i32> @llvm.aarch64.neon.fcltz.v1i32.f32.f32(float %a, float 0.0)
|
||||
%0 = extractelement <1 x i32> %fclt1.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcltzd_f64(double %a) {
|
||||
; CHECK: test_vcltzd_f64
|
||||
; CHECK-LABEL: test_vcltzd_f64
|
||||
; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0
|
||||
entry:
|
||||
%vclt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vclt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double> %vclt.i, <1 x float> zeroinitializer) #5
|
||||
%0 = extractelement <1 x i64> %vclt1.i, i32 0
|
||||
%fclt1.i = call <1 x i64> @llvm.aarch64.neon.fcltz.v1i64.f64.f32(double %a, float 0.0)
|
||||
%0 = extractelement <1 x i64> %fclt1.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcages_f32(float %a, float %b) {
|
||||
; CHECK: test_vcages_f32
|
||||
; CHECK-LABEL: test_vcages_f32
|
||||
; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcage.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcage1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
|
||||
%0 = extractelement <1 x i32> %vcage2.i, i32 0
|
||||
%fcage2.i = call <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcage2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcaged_f64(double %a, double %b) {
|
||||
; CHECK: test_vcaged_f64
|
||||
; CHECK-LABEL: test_vcaged_f64
|
||||
; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcage.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcage1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
|
||||
%0 = extractelement <1 x i64> %vcage2.i, i32 0
|
||||
%fcage2.i = call <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcage2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcagts_f32(float %a, float %b) {
|
||||
; CHECK: test_vcagts_f32
|
||||
; CHECK-LABEL: test_vcagts_f32
|
||||
; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcagt.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcagt1.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcagt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcagt.i, <1 x float> %vcagt1.i)
|
||||
%0 = extractelement <1 x i32> %vcagt2.i, i32 0
|
||||
%fcagt2.i = call <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcagt2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcagtd_f64(double %a, double %b) {
|
||||
; CHECK: test_vcagtd_f64
|
||||
; CHECK-LABEL: test_vcagtd_f64
|
||||
; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcagt.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcagt1.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcagt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcagt.i, <1 x double> %vcagt1.i)
|
||||
%0 = extractelement <1 x i64> %vcagt2.i, i32 0
|
||||
%fcagt2.i = call <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcagt2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcales_f32(float %a, float %b) {
|
||||
; CHECK: test_vcales_f32
|
||||
; CHECK-LABEL: test_vcales_f32
|
||||
; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcage.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcage1.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
|
||||
%0 = extractelement <1 x i32> %vcage2.i, i32 0
|
||||
%fcage2.i = call <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcage2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcaled_f64(double %a, double %b) {
|
||||
; CHECK: test_vcaled_f64
|
||||
; CHECK-LABEL: test_vcaled_f64
|
||||
; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcage.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcage1.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
|
||||
%0 = extractelement <1 x i64> %vcage2.i, i32 0
|
||||
%fcage2.i = call <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcage2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
define i32 @test_vcalts_f32(float %a, float %b) {
|
||||
; CHECK: test_vcalts_f32
|
||||
; CHECK-LABEL: test_vcalts_f32
|
||||
; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
|
||||
entry:
|
||||
%vcalt.i = insertelement <1 x float> undef, float %b, i32 0
|
||||
%vcalt1.i = insertelement <1 x float> undef, float %a, i32 0
|
||||
%vcalt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcalt.i, <1 x float> %vcalt1.i)
|
||||
%0 = extractelement <1 x i32> %vcalt2.i, i32 0
|
||||
%fcalt2.i = call <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float %a, float %b)
|
||||
%0 = extractelement <1 x i32> %fcalt2.i, i32 0
|
||||
ret i32 %0
|
||||
}
|
||||
|
||||
define i64 @test_vcaltd_f64(double %a, double %b) {
|
||||
; CHECK: test_vcaltd_f64
|
||||
; CHECK-LABEL: test_vcaltd_f64
|
||||
; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
|
||||
entry:
|
||||
%vcalt.i = insertelement <1 x double> undef, double %b, i32 0
|
||||
%vcalt1.i = insertelement <1 x double> undef, double %a, i32 0
|
||||
%vcalt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcalt.i, <1 x double> %vcalt1.i)
|
||||
%0 = extractelement <1 x i64> %vcalt2.i, i32 0
|
||||
%fcalt2.i = call <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double %a, double %b)
|
||||
%0 = extractelement <1 x i64> %fcalt2.i, i32 0
|
||||
ret i64 %0
|
||||
}
|
||||
|
||||
declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
|
||||
declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f32(double, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f64(double, double)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f32(double, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double, double)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fclez.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fclez.v1i64.f64.f32(double, float)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f32(double, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double, double)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fcltz.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcltz.v1i64.f64.f32(double, float)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double, double)
|
||||
declare <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float, float)
|
||||
declare <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double, double)
|
||||
|
|
Loading…
Reference in New Issue