llvm-project/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

4947 lines
172 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVE
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVEFP
define arm_aapcs_vfpcc <4 x float> @vcmp_oeq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 eq, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oeq <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_one_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp one <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ogt <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oge <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp olt <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ole <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ueq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ueq <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_une_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_une_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ne, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp une <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ugt <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uge <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ult <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ule <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s2
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s3
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ord <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s0, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s1, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s2, s2
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f32 s3, s3
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uno <4 x float> %src, zeroinitializer
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_oeq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oeq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oeq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oeq <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_one_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_one_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp one <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ogt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ogt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ogt <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_oge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_oge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oge <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_olt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_olt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp olt <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ole_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ole_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ole <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ueq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ueq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ueq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ueq <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_une_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_une_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_une_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ne, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp une <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ugt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ugt <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uge <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ult_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ult_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ult <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ule_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ule_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ule <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_ord_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, s12
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, s1
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, s2
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, s3
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_ord_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ord <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_uno_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s12, s12
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s1, s1
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s2, s2
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s3, s3
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
[ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
2019-10-08 16:25:42 +08:00
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_uno_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uno <8 x half> %src, zeroinitializer
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
2019-12-02 22:46:22 +08:00
; Reversed
define arm_aapcs_vfpcc <4 x float> @vcmp_r_oeq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 eq, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oeq <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_one_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_one_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp one <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ogt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ogt <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_oge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oge <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_olt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp olt <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ole_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ole <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ueq_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r3, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 ge, q0, zr
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ueq <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_une_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_une_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ne, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp une <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ugt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ugt <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_uge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uge <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ult_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ult <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ule_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, #0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ule <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_ord_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, s2
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s3
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ord <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <4 x float> @vcmp_r_uno_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) {
; CHECK-MVE-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vcmp.f32 s0, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f32 s1, s1
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vcmp.f32 s2, s2
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r3, #0
; CHECK-MVE-NEXT: vcmp.f32 s3, s3
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r3, #1
; CHECK-MVE-NEXT: cmp r3, #0
; CHECK-MVE-NEXT: cset r3, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7
; CHECK-MVE-NEXT: lsls r0, r3, #31
; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6
; CHECK-MVE-NEXT: lsls r0, r2, #31
; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5
; CHECK-MVE-NEXT: lsls r0, r1, #31
; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v4f32:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f32 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f32 gt, q0, zr
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uno <4 x float> zeroinitializer, %src
%s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b
ret <4 x float> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_oeq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oeq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oeq <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_one_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_one_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_one_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp one <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ogt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it mi
; CHECK-MVE-NEXT: movmi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ogt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ogt <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_oge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ls
; CHECK-MVE-NEXT: movls r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_oge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp oge <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_olt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it gt
; CHECK-MVE-NEXT: movgt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_olt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp olt <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ole_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ge
; CHECK-MVE-NEXT: movge r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ole_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ole <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ueq_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r2, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: mov.w r0, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r1, #1
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it eq
; CHECK-MVE-NEXT: moveq r0, #1
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ueq_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 ge, q0, zr
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ueq <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_une_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_une_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it ne
; CHECK-MVE-NEXT: movne r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_une_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ne, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp une <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ugt_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it lt
; CHECK-MVE-NEXT: movlt r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ugt_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ugt <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_uge_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it le
; CHECK-MVE-NEXT: movle r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uge_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uge <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ult_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it hi
; CHECK-MVE-NEXT: movhi r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ult_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ult <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ule_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, #0
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, #0
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, #0
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, #0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it pl
; CHECK-MVE-NEXT: movpl r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ule_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ule <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_ord_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, s12
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, s1
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, s2
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, s3
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vc
; CHECK-MVE-NEXT: movvc r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_ord_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 gt, q0, zr
; CHECK-MVEFP-NEXT: vpsel q0, q2, q1
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp ord <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}
define arm_aapcs_vfpcc <8 x half> @vcmp_r_uno_v8f16(<8 x half> %src, <8 x half> %a, <8 x half> %b) {
; CHECK-MVE-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
; CHECK-MVE-NEXT: vmovx.f16 s12, s0
; CHECK-MVE-NEXT: movs r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s12, s12
; CHECK-MVE-NEXT: vmovx.f16 s12, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s14, s8
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: mov.w r2, #0
; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r2, #1
; CHECK-MVE-NEXT: cmp r2, #0
; CHECK-MVE-NEXT: cset r2, ne
; CHECK-MVE-NEXT: vmov r1, s12
; CHECK-MVE-NEXT: lsls r2, r2, #31
; CHECK-MVE-NEXT: vcmp.f16 s1, s1
; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r2, s12
; CHECK-MVE-NEXT: vmovx.f16 s18, s9
; CHECK-MVE-NEXT: vmov.16 q3[0], r2
; CHECK-MVE-NEXT: vmovx.f16 s0, s3
; CHECK-MVE-NEXT: vmov.16 q3[1], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s1
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[2], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s5
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s2, s2
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s18, s10
; CHECK-MVE-NEXT: vmov.16 q3[3], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vmovx.f16 s16, s2
; CHECK-MVE-NEXT: vcmp.f16 s16, s16
; CHECK-MVE-NEXT: vmov.16 q3[4], r1
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s16, s6
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vcmp.f16 s3, s3
; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: vcmp.f16 s0, s0
; CHECK-MVE-NEXT: vmov.16 q3[5], r1
; CHECK-MVE-NEXT: mov.w r1, #0
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r1, #1
; CHECK-MVE-NEXT: cmp r1, #0
; CHECK-MVE-NEXT: cset r1, ne
; CHECK-MVE-NEXT: vmovx.f16 s0, s7
; CHECK-MVE-NEXT: lsls r1, r1, #31
; CHECK-MVE-NEXT: vmovx.f16 s2, s11
; CHECK-MVE-NEXT: vseleq.f16 s16, s11, s7
; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-MVE-NEXT: it vs
; CHECK-MVE-NEXT: movvs r0, #1
; CHECK-MVE-NEXT: cmp r0, #0
; CHECK-MVE-NEXT: cset r0, ne
; CHECK-MVE-NEXT: vmov r1, s16
; CHECK-MVE-NEXT: lsls r0, r0, #31
; CHECK-MVE-NEXT: vmov.16 q3[6], r1
; CHECK-MVE-NEXT: vseleq.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q3[7], r0
; CHECK-MVE-NEXT: vmov q0, q3
; CHECK-MVE-NEXT: vpop {d8, d9}
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: vcmp_r_uno_v8f16:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vpt.f16 le, q0, zr
; CHECK-MVEFP-NEXT: vcmpt.f16 gt, q0, zr
2019-12-02 22:46:22 +08:00
; CHECK-MVEFP-NEXT: vpsel q0, q1, q2
; CHECK-MVEFP-NEXT: bx lr
entry:
%c = fcmp uno <8 x half> zeroinitializer, %src
%s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b
ret <8 x half> %s
}