llvm-project/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

4894 lines
173 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2020-11-12 16:37:46 +08:00
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVE
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVEFP
define arm_aapcs_vfpcc <4 x float> @vcmp_oeq_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 eq, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp oeq <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_one_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp one <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ogt <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp oge <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp olt <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ole <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ueq_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ueq <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_une_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_une_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ne, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp une <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ugt <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp uge <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ult <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ule <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ord <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s4
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s4
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s4
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp uno <4 x float> %src, %sp
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_oeq_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp oeq <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_one_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp one <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ogt <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp oge <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp olt <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ole <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ueq_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ueq <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_une_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_une_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 ne, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp une <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ugt <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp uge <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ult <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ule <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ord <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp uno <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
2019-12-02 22:46:22 +08:00
; Reversed
define arm_aapcs_vfpcc <4 x float> @vcmp_r_oeq_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 eq, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp oeq <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_one_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_one_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp one <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ogt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ogt <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_oge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp oge <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_olt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp olt <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ole_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ole <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ueq_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ueq <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_une_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_une_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ne, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp une <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ugt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ugt <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_uge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp uge <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ult_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ult <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ule_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ule <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ord_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp ord <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_uno_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s4, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s2
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s4, s3
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov r0, s4
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f32 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <4 x float> undef, float %src2, i32 0
%sp = shufflevector <4 x float> %i, <4 x float> undef, <4 x i32> zeroinitializer
%c = fcmp uno <4 x float> %sp, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_oeq_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp oeq <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_one_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_one_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp one <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ogt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ogt <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_oge_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp oge <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_olt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp olt <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ole_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ole <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ueq_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ueq <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_une_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_une_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vcmp.f16 ne, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp une <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ugt_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ugt <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_uge_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp uge <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ult_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ult <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ule_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ule <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ord_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q3, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp ord <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_uno_v8f16(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
2019-12-02 22:46:22 +08:00
; CHECK-MVE-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s4, s0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s2
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s4, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s4, s3
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
2019-12-02 22:46:22 +08:00
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, r0
; CHECK-MVEFP-NEXT: vcmpt.f16 gt, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%i = insertelement <8 x half> undef, half %src2, i32 0
%sp = shufflevector <8 x half> %i, <8 x half> undef, <8 x i32> zeroinitializer
%c = fcmp uno <8 x half> %sp, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_oeq_v8f16_bc(<8 x half> %src, half %src2, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v8f16_bc:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s6, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s8
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s12
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, s4
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s13
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s16, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s1
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s9
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s1, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s14
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s17, s13, s9
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s17, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s2
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s10
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s2, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: vmovx.f16 s5, s15
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s18, s14, s10
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vins.f16 s18, s6
; CHECK-MVE-NEXT: vmovx.f16 s6, s3
; CHECK-MVE-NEXT: vcmp.f16 s6, s4
; CHECK-MVE-NEXT: vmovx.f16 s6, s11
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s3, s4
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s19, s15, s11
; CHECK-MVE-NEXT: vins.f16 s19, s6
; CHECK-MVE-NEXT: vmov q0, q4
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v8f16_bc:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vmov.f16 r0, s4
; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, r0
; CHECK-MVEFP-NEXT: vpsel q0, q2, q3
; CHECK-MVEFP-NEXT: bx lr
entry:
%src2bc = bitcast half %src2 to i16
%i = insertelement <8 x i16> undef, i16 %src2bc, i32 0
%spbc = shufflevector <8 x i16> %i, <8 x i16> undef, <8 x i32> zeroinitializer
%sp = bitcast <8 x i16> %spbc to <8 x half>
%c = fcmp oeq <8 x half> %src, %sp
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}