2020-03-18 23:23:17 +08:00
|
|
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
|
|
# RUN: llc -run-pass arm-mve-vpt-opts %s -o - | FileCheck %s
|
|
|
|
|
|
|
|
--- |
|
|
|
|
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
|
2020-04-22 23:33:11 +08:00
|
|
|
target triple = "thumbv8.1m.main-none-none-eabi"
|
2020-03-18 23:23:17 +08:00
|
|
|
|
|
|
|
; Functions are intentionally left blank - see the MIR sequences below.
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @vcmp_with_opposite_cond(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @vcmp_with_opposite_cond_and_swapped_operands(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @triple_vcmp(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @killed_vccr_values(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @predicated_vcmps(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @flt_with_swapped_operands(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @different_opcodes(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @incorrect_condcode(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @vpr_or_vccr_write_between_vcmps(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
2020-04-02 22:24:14 +08:00
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention_multi(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention_predicated_vpnots(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention_copies(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention_vpnot_reordering(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc <4 x float> @spill_prevention_stop_after_write(<4 x float> %inactive1) #0 {
|
|
|
|
entry:
|
|
|
|
ret <4 x float> %inactive1
|
|
|
|
}
|
|
|
|
|
2020-03-18 23:23:17 +08:00
|
|
|
attributes #0 = { "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" }
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: vcmp_with_opposite_cond
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: vcmp_with_opposite_cond
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16_]], 0, $noreg
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf32_]], 0, $noreg
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16_]], 0, $noreg
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32_]], 0, $noreg
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: successors: %bb.5(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi8_:%[0-9]+]]:vccr = MVE_VCMPi8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8_]], 0, $noreg
|
|
|
|
; CHECK: bb.5:
|
|
|
|
; CHECK: successors: %bb.6(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs16_:%[0-9]+]]:vccr = MVE_VCMPs16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16_]], 0, $noreg
|
|
|
|
; CHECK: bb.6:
|
|
|
|
; CHECK: successors: %bb.7(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: bb.7:
|
|
|
|
; CHECK: successors: %bb.8(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs8_:%[0-9]+]]:vccr = MVE_VCMPs8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8_]], 0, $noreg
|
|
|
|
; CHECK: bb.8:
|
|
|
|
; CHECK: successors: %bb.9(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu16_:%[0-9]+]]:vccr = MVE_VCMPu16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16_]], 0, $noreg
|
|
|
|
; CHECK: bb.9:
|
|
|
|
; CHECK: successors: %bb.10(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu32_:%[0-9]+]]:vccr = MVE_VCMPu32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32_]], 0, $noreg
|
|
|
|
; CHECK: bb.10:
|
|
|
|
; CHECK: successors: %bb.11(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu8_:%[0-9]+]]:vccr = MVE_VCMPu8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8_]], 0, $noreg
|
|
|
|
; CHECK: bb.11:
|
|
|
|
; CHECK: successors: %bb.12(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16r:%[0-9]+]]:vccr = MVE_VCMPf16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT11:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16r]], 0, $noreg
|
|
|
|
; CHECK: bb.12:
|
|
|
|
; CHECK: successors: %bb.13(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf32r:%[0-9]+]]:vccr = MVE_VCMPf32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT12:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf32r]], 0, $noreg
|
|
|
|
; CHECK: bb.13:
|
|
|
|
; CHECK: successors: %bb.14(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi16r:%[0-9]+]]:vccr = MVE_VCMPi16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT13:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16r]], 0, $noreg
|
|
|
|
; CHECK: bb.14:
|
|
|
|
; CHECK: successors: %bb.15(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi32r:%[0-9]+]]:vccr = MVE_VCMPi32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT14:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32r]], 0, $noreg
|
|
|
|
; CHECK: bb.15:
|
|
|
|
; CHECK: successors: %bb.16(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi8r:%[0-9]+]]:vccr = MVE_VCMPi8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT15:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8r]], 0, $noreg
|
|
|
|
; CHECK: bb.16:
|
|
|
|
; CHECK: successors: %bb.17(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs16r:%[0-9]+]]:vccr = MVE_VCMPs16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT16:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16r]], 0, $noreg
|
|
|
|
; CHECK: bb.17:
|
|
|
|
; CHECK: successors: %bb.18(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32r:%[0-9]+]]:vccr = MVE_VCMPs32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT17:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32r]], 0, $noreg
|
|
|
|
; CHECK: bb.18:
|
|
|
|
; CHECK: successors: %bb.19(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs8r:%[0-9]+]]:vccr = MVE_VCMPs8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT18:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8r]], 0, $noreg
|
|
|
|
; CHECK: bb.19:
|
|
|
|
; CHECK: successors: %bb.20(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu16r:%[0-9]+]]:vccr = MVE_VCMPu16r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT19:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16r]], 0, $noreg
|
|
|
|
; CHECK: bb.20:
|
|
|
|
; CHECK: successors: %bb.21(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu32r:%[0-9]+]]:vccr = MVE_VCMPu32r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT20:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32r]], 0, $noreg
|
|
|
|
; CHECK: bb.21:
|
|
|
|
; CHECK: successors: %bb.22(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu8r:%[0-9]+]]:vccr = MVE_VCMPu8r %1:mqpr, %25:gprwithzr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT21:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8r]], 0, $noreg
|
|
|
|
; CHECK: bb.22:
|
|
|
|
; CHECK: [[MVE_VCMPu8r1:%[0-9]+]]:vccr = MVE_VCMPu8r %1:mqpr, $zr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT22:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8r1]], 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that VCMPs with an opposite condition are correctly converted into VPNOTs.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%3:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%4:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.1:
|
|
|
|
%5:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%6:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.2:
|
|
|
|
%7:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%8:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.3:
|
|
|
|
%9:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%10:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.4:
|
|
|
|
%11:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%12:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.5:
|
|
|
|
%13:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%14:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.6:
|
|
|
|
%15:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%16:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.7:
|
|
|
|
%17:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%18:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.8:
|
|
|
|
%19:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%20:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.9:
|
|
|
|
%21:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%22:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.10:
|
|
|
|
%23:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%24:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.11:
|
|
|
|
%25:vccr = MVE_VCMPf16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%26:vccr = MVE_VCMPf16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.12:
|
|
|
|
%27:vccr = MVE_VCMPf32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%28:vccr = MVE_VCMPf32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.13:
|
|
|
|
%29:vccr = MVE_VCMPi16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%30:vccr = MVE_VCMPi16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.14:
|
|
|
|
%31:vccr = MVE_VCMPi32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%32:vccr = MVE_VCMPi32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.15:
|
|
|
|
%33:vccr = MVE_VCMPi8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%34:vccr = MVE_VCMPi8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.16:
|
|
|
|
%35:vccr = MVE_VCMPs16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%36:vccr = MVE_VCMPs16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.17:
|
|
|
|
%37:vccr = MVE_VCMPs32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%38:vccr = MVE_VCMPs32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.18:
|
|
|
|
%39:vccr = MVE_VCMPs8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%40:vccr = MVE_VCMPs8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.19:
|
|
|
|
%41:vccr = MVE_VCMPu16r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%42:vccr = MVE_VCMPu16r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.20:
|
|
|
|
%43:vccr = MVE_VCMPu32r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%44:vccr = MVE_VCMPu32r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.21:
|
|
|
|
%45:vccr = MVE_VCMPu8r %0:mqpr, %2:gprwithzr, 10, 0, $noreg
|
|
|
|
%46:vccr = MVE_VCMPu8r %0:mqpr, %2:gprwithzr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.22:
|
|
|
|
; There shouldn't be any exception for $zr, so the second VCMP should
|
|
|
|
; be transformed into a VPNOT.
|
|
|
|
%47:vccr = MVE_VCMPu8r %0:mqpr, $zr, 10, 0, $noreg
|
|
|
|
%48:vccr = MVE_VCMPu8r %0:mqpr, $zr, 11, 0, $noreg
|
|
|
|
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: vcmp_with_opposite_cond_and_swapped_operands
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: vcmp_with_opposite_cond_and_swapped_operands
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi16_]], 0, $noreg
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi32_]], 0, $noreg
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi8_:%[0-9]+]]:vccr = MVE_VCMPi8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPi8_]], 0, $noreg
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs16_:%[0-9]+]]:vccr = MVE_VCMPs16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs16_]], 0, $noreg
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: successors: %bb.5(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: bb.5:
|
|
|
|
; CHECK: successors: %bb.6(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs8_:%[0-9]+]]:vccr = MVE_VCMPs8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs8_]], 0, $noreg
|
|
|
|
; CHECK: bb.6:
|
|
|
|
; CHECK: successors: %bb.7(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu16_:%[0-9]+]]:vccr = MVE_VCMPu16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu16_]], 0, $noreg
|
|
|
|
; CHECK: bb.7:
|
|
|
|
; CHECK: successors: %bb.8(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPu32_:%[0-9]+]]:vccr = MVE_VCMPu32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu32_]], 0, $noreg
|
|
|
|
; CHECK: bb.8:
|
|
|
|
; CHECK: [[MVE_VCMPu8_:%[0-9]+]]:vccr = MVE_VCMPu8 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPu8_]], 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that VCMPs with an opposite condition and swapped operands are
|
|
|
|
; correctly converted into VPNOTs.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPi16 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.1:
|
|
|
|
%4:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%5:vccr = MVE_VCMPi32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.2:
|
|
|
|
%6:vccr = MVE_VCMPi8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%7:vccr = MVE_VCMPi8 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.3:
|
|
|
|
%8:vccr = MVE_VCMPs16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%9:vccr = MVE_VCMPs16 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.4:
|
|
|
|
%10:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%11:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.5:
|
|
|
|
%12:vccr = MVE_VCMPs8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%13:vccr = MVE_VCMPs8 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.6:
|
|
|
|
%14:vccr = MVE_VCMPu16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%15:vccr = MVE_VCMPu16 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.7:
|
|
|
|
%16:vccr = MVE_VCMPu32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%17:vccr = MVE_VCMPu32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.8:
|
|
|
|
%18:vccr = MVE_VCMPu8 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%19:vccr = MVE_VCMPu8 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: triple_vcmp
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
;
|
|
|
|
; Tests that, when there are 2 "VPNOT-like VCMPs" in a row, only the first
|
|
|
|
; becomes a VPNOT.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
; CHECK-LABEL: name: triple_vcmp
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 12, 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
%4:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: killed_vccr_values
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
2020-04-02 22:24:14 +08:00
|
|
|
; CHECK-LABEL: name: killed_vccr_values
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPf16_]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPf16_]], 0, $noreg
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]]
|
|
|
|
; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT3]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR3]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR4]]
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT5]], undef [[MVE_VORR5]]
|
|
|
|
; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR6]]
|
|
|
|
; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
2020-03-18 23:23:17 +08:00
|
|
|
bb.0:
|
|
|
|
;
|
|
|
|
; Tests that, if the result of the VCMP is killed before the
|
|
|
|
; second VCMP (that will be converted into a VPNOT) is found,
|
|
|
|
; the kill flag is removed.
|
|
|
|
;
|
|
|
|
%2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, killed %2:vccr, undef %3:mqpr
|
|
|
|
%4:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 0, $noreg
|
2020-04-02 22:24:14 +08:00
|
|
|
bb.1:
|
|
|
|
;
|
|
|
|
; Tests that, if the result of the VCMP that has been replaced with a
|
|
|
|
; VPNOT is killed (before the insertion of the second VPNOT),
|
|
|
|
; the kill flag is removed.
|
|
|
|
;
|
|
|
|
%5:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%6:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
%7:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, killed %6:vccr, undef %7:mqpr
|
|
|
|
%8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %5:vccr, undef %8:mqpr
|
|
|
|
bb.2:
|
|
|
|
;
|
|
|
|
; Tests that the kill flag is removed when inserting a VPNOT for
|
|
|
|
; an instruction.
|
|
|
|
;
|
|
|
|
%9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%10:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
%11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %11:mqpr
|
|
|
|
%12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, killed %9:vccr, undef %12:mqpr
|
|
|
|
bb.3:
|
|
|
|
;
|
|
|
|
; Tests that the kill flag is correctly removed when replacing a use
|
|
|
|
; of the opposite VCCR value with the last VPNOT's result
|
|
|
|
;
|
|
|
|
%13:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%14:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
%15:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %14:vccr, undef %15:mqpr
|
|
|
|
%16:mqpr = MVE_VORR %15:mqpr, %15:mqpr, 1, %13:vccr, undef %16:mqpr
|
|
|
|
%17:mqpr = MVE_VORR %16:mqpr, %16:mqpr, 1, killed %13:vccr, undef %17:mqpr
|
2020-03-18 23:23:17 +08:00
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: predicated_vcmps
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: predicated_vcmps
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi16_:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPi16_1:%[0-9]+]]:vccr = MVE_VCMPi16 %2:mqpr, %1:mqpr, 12, 1, [[MVE_VCMPi16_]]
|
|
|
|
; CHECK: [[MVE_VCMPi16_2:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi16_]]
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi32_:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPi32_1:%[0-9]+]]:vccr = MVE_VCMPi32 %2:mqpr, %1:mqpr, 12, 1, [[MVE_VCMPi32_]]
|
|
|
|
; CHECK: [[MVE_VCMPi32_2:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi32_]]
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf16_1:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPf16_]]
|
|
|
|
; CHECK: [[MVE_VCMPf16_2:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPf16_]]
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf32_1:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPf32_]]
|
|
|
|
; CHECK: [[MVE_VCMPf32_2:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPf32_]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: successors: %bb.5(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPi16_3:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPi16_4:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPi16_3]]
|
|
|
|
; CHECK: [[MVE_VCMPi16_5:%[0-9]+]]:vccr = MVE_VCMPi16 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi16_3]]
|
|
|
|
; CHECK: bb.5:
|
|
|
|
; CHECK: [[MVE_VCMPi32_3:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPi32_4:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 11, 1, [[MVE_VCMPi32_3]]
|
|
|
|
; CHECK: [[MVE_VCMPi32_5:%[0-9]+]]:vccr = MVE_VCMPi32 %1:mqpr, %2:mqpr, 10, 1, [[MVE_VCMPi32_3]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that predicated VCMPs are not replaced.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPi16 %1:mqpr, %0:mqpr, 12, 1, %2:vccr
|
|
|
|
%4:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 1, %2:vccr
|
|
|
|
|
|
|
|
bb.1:
|
|
|
|
%5:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%6:vccr = MVE_VCMPi32 %1:mqpr, %0:mqpr, 12, 1, %5:vccr
|
|
|
|
%7:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 1, %5:vccr
|
|
|
|
|
|
|
|
bb.2:
|
|
|
|
%8:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%9:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 11, 1, %8:vccr
|
|
|
|
%10:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 1, %8:vccr
|
|
|
|
|
|
|
|
bb.3:
|
|
|
|
%11:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%12:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 11, 1, %11:vccr
|
|
|
|
%13:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 1, %11:vccr
|
|
|
|
|
|
|
|
bb.4:
|
|
|
|
%14:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%15:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 11, 1, %14:vccr
|
|
|
|
%16:vccr = MVE_VCMPi16 %0:mqpr, %1:mqpr, 10, 1, %14:vccr
|
|
|
|
|
|
|
|
bb.5:
|
|
|
|
%17:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%18:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 11, 1, %17:vccr
|
|
|
|
%19:vccr = MVE_VCMPi32 %0:mqpr, %1:mqpr, 10, 1, %17:vccr
|
|
|
|
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: flt_with_swapped_operands
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: flt_with_swapped_operands
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf16_1:%[0-9]+]]:vccr = MVE_VCMPf16 %2:mqpr, %1:mqpr, 12, 0, $noreg
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf32_:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf32_1:%[0-9]+]]:vccr = MVE_VCMPf32 %2:mqpr, %1:mqpr, 12, 0, $noreg
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPf16_2:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf16_3:%[0-9]+]]:vccr = MVE_VCMPf16 %2:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: [[MVE_VCMPf32_2:%[0-9]+]]:vccr = MVE_VCMPf32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPf32_3:%[0-9]+]]:vccr = MVE_VCMPf32 %2:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that float VCMPs with an opposite condition and swapped operands
|
|
|
|
; are not transformed into VPNOTs.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPf16 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.1:
|
|
|
|
%4:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%5:vccr = MVE_VCMPf32 %1:mqpr, %0:mqpr, 12, 0, $noreg
|
|
|
|
|
|
|
|
bb.2:
|
|
|
|
%6:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%7:vccr = MVE_VCMPf16 %1:mqpr, %0:mqpr, 11, 0, $noreg
|
|
|
|
|
|
|
|
bb.3:
|
|
|
|
%8:vccr = MVE_VCMPf32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%9:vccr = MVE_VCMPf32 %1:mqpr, %0:mqpr, 11, 0, $noreg
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: different_opcodes
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
;
|
|
|
|
; Tests that a "VPNOT-like VCMP" with an opcode different from the previous VCMP
|
|
|
|
; is not transformed into a VPNOT.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
; CHECK-LABEL: name: different_opcodes
|
|
|
|
; CHECK: [[MVE_VCMPf16_:%[0-9]+]]:vccr = MVE_VCMPf16 %1:mqpr, %2:mqpr, 0, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 1, 1, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
%2:vccr = MVE_VCMPf16 %0:mqpr, %1:mqpr, 0, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 1, 1, $noreg
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: incorrect_condcode
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: incorrect_condcode
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 11, 0, $noreg
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPs32_3:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 12, 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that a VCMP is not transformed into a VPNOT if its CondCode is not
|
|
|
|
; the opposite CondCode.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 11, 0, $noreg
|
|
|
|
bb.1:
|
|
|
|
%4:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%5:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 12, 0, $noreg
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: vpr_or_vccr_write_between_vcmps
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
;
|
|
|
|
; Tests that a "VPNOT-like VCMP" will not be transformed into a VPNOT if
|
|
|
|
; VCCR/VPR is written to in-between.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
; CHECK-LABEL: name: vpr_or_vccr_write_between_vcmps
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 12, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT killed [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 12, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT killed %2:vccr, 0, $noreg
|
|
|
|
%4:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 10, 0, $noreg
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
2020-04-02 22:24:14 +08:00
|
|
|
---
|
|
|
|
name: spill_prevention
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: spill_prevention
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]]
|
|
|
|
; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT3]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR3]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR4]]
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT5]], undef [[MVE_VORR5]]
|
|
|
|
; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 0, $noreg, undef [[MVE_VORR6]]
|
|
|
|
; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]]
|
|
|
|
; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR7]], [[MVE_VORR7]], 0, $noreg, undef [[MVE_VORR8]]
|
|
|
|
; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT6]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR9:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR8]], [[MVE_VORR8]], 1, [[MVE_VPNOT7]], undef [[MVE_VORR9]]
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR10:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT8]], undef [[MVE_VORR10]]
|
|
|
|
; CHECK: [[MVE_VORR11:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR10]], [[MVE_VORR10]], 1, [[MVE_VPNOT8]], undef [[MVE_VORR11]]
|
|
|
|
; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT8]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR12:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR11]], [[MVE_VORR11]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR12]]
|
|
|
|
; CHECK: [[MVE_VORR13:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR12]], [[MVE_VORR12]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR13]]
|
|
|
|
; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT9]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR14:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR13]], [[MVE_VORR13]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR14]]
|
|
|
|
; CHECK: [[MVE_VORR15:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR14]], [[MVE_VORR14]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR15]]
|
|
|
|
; CHECK: [[MVE_VPNOT11:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT10]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR16:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR15]], [[MVE_VORR15]], 1, [[MVE_VPNOT11]], undef [[MVE_VORR16]]
|
|
|
|
; CHECK: [[MVE_VORR17:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR16]], [[MVE_VORR16]], 1, [[MVE_VPNOT11]], undef [[MVE_VORR17]]
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_3:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT12:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_3]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR18:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT12]], undef [[MVE_VORR11]]
|
|
|
|
; CHECK: [[MVE_VPNOT13:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT12]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR19:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT13]], undef [[MVE_VORR19]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: [[VMSR_P0_:%[0-9]+]]:vccr = VMSR_P0 killed %32:gpr, 14 /* CC::al */, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT14:%[0-9]+]]:vccr = MVE_VPNOT [[VMSR_P0_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR20:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR19]], [[MVE_VORR19]], 1, [[MVE_VPNOT14]], undef [[MVE_VORR20]]
|
|
|
|
; CHECK: [[MVE_VPNOT15:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT14]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR21:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR20]], [[MVE_VORR20]], 1, [[MVE_VPNOT15]], undef [[MVE_VORR21]]
|
|
|
|
; CHECK: [[MVE_VPNOT16:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT15]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR22:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR21]], [[MVE_VORR21]], 1, [[MVE_VPNOT16]], undef [[MVE_VORR22]]
|
|
|
|
; CHECK: [[MVE_VPNOT17:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT16]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR23:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR22]], [[MVE_VORR22]], 1, [[MVE_VPNOT17]], undef [[MVE_VORR23]]
|
|
|
|
; CHECK: [[MVE_VPNOT18:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT17]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR24:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR23]], [[MVE_VORR23]], 1, [[MVE_VPNOT18]], undef [[MVE_VORR24]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
bb.0:
|
|
|
|
;
|
|
|
|
; Basic test case
|
|
|
|
;
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr
|
|
|
|
%6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr
|
|
|
|
%7:mqpr = MVE_VORR %6:mqpr, %6:mqpr, 1, %2:vccr, undef %7:mqpr
|
|
|
|
%8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %3:vccr, undef %8:mqpr
|
|
|
|
bb.1:
|
|
|
|
;
|
|
|
|
; Tests that unpredicated instructions in the middle of the block
|
|
|
|
; don't interfere with the replacement.
|
|
|
|
;
|
|
|
|
%9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%10:vccr = MVE_VPNOT %9:vccr, 0, $noreg
|
|
|
|
%11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %11:mqpr
|
|
|
|
%12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 0, $noreg, undef %12:mqpr
|
|
|
|
%13:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %9:vccr, undef %13:mqpr
|
|
|
|
%14:mqpr = MVE_VORR %13:mqpr, %13:mqpr, 0, $noreg, undef %14:mqpr
|
|
|
|
%15:mqpr = MVE_VORR %14:mqpr, %14:mqpr, 1, %10:vccr, undef %15:mqpr
|
|
|
|
bb.2:
|
|
|
|
;
|
|
|
|
; Tests that all uses of the register are replaced, even when it's used
|
|
|
|
; multiple times in a row.
|
|
|
|
;
|
|
|
|
%16:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%17:vccr = MVE_VPNOT %16:vccr, 0, $noreg
|
|
|
|
%18:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %17:vccr, undef %18:mqpr
|
|
|
|
%19:mqpr = MVE_VORR %18:mqpr, %18:mqpr, 1, %17:vccr, undef %19:mqpr
|
|
|
|
%20:mqpr = MVE_VORR %19:mqpr, %19:mqpr, 1, %16:vccr, undef %20:mqpr
|
|
|
|
%21:mqpr = MVE_VORR %20:mqpr, %20:mqpr, 1, %16:vccr, undef %21:mqpr
|
|
|
|
%22:mqpr = MVE_VORR %21:mqpr, %21:mqpr, 1, %17:vccr, undef %22:mqpr
|
|
|
|
%23:mqpr = MVE_VORR %22:mqpr, %22:mqpr, 1, %17:vccr, undef %23:mqpr
|
|
|
|
%24:mqpr = MVE_VORR %23:mqpr, %23:mqpr, 1, %16:vccr, undef %24:mqpr
|
|
|
|
%25:mqpr = MVE_VORR %24:mqpr, %24:mqpr, 1, %16:vccr, undef %25:mqpr
|
|
|
|
bb.3:
|
|
|
|
;
|
|
|
|
; Tests that already present VPNOTs are "registered" by the pass so
|
|
|
|
; it does not insert a useless VPNOT.
|
|
|
|
;
|
|
|
|
%26:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%27:vccr = MVE_VPNOT %26:vccr, 0, $noreg
|
|
|
|
%28:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %27:vccr, undef %19:mqpr
|
|
|
|
%29:vccr = MVE_VPNOT %27:vccr, 0, $noreg
|
|
|
|
%30:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %26:vccr, undef %30:mqpr
|
|
|
|
bb.4:
|
|
|
|
;
|
|
|
|
; Tests that the pass works with instructions other than vcmp.
|
|
|
|
;
|
|
|
|
%32:vccr = VMSR_P0 killed %31:gpr, 14, $noreg
|
|
|
|
%33:vccr = MVE_VPNOT %32:vccr, 0, $noreg
|
|
|
|
%34:mqpr = MVE_VORR %30:mqpr, %30:mqpr, 1, %33:vccr, undef %34:mqpr
|
|
|
|
%35:mqpr = MVE_VORR %34:mqpr, %34:mqpr, 1, %32:vccr, undef %35:mqpr
|
|
|
|
%36:mqpr = MVE_VORR %35:mqpr, %35:mqpr, 1, %33:vccr, undef %36:mqpr
|
|
|
|
%37:mqpr = MVE_VORR %36:mqpr, %36:mqpr, 1, %32:vccr, undef %37:mqpr
|
|
|
|
%38:mqpr = MVE_VORR %37:mqpr, %37:mqpr, 1, %33:vccr, undef %38:mqpr
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: spill_prevention_multi
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
bb.0:
|
|
|
|
;
|
|
|
|
; Tests that multiple groups of predicated instructions in the same basic block are optimized.
|
|
|
|
;
|
|
|
|
; CHECK-LABEL: name: spill_prevention_multi
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR3]]
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR4]]
|
|
|
|
; CHECK: [[MVE_VPNOT4:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR4]], [[MVE_VORR4]], 1, [[MVE_VPNOT4]], undef [[MVE_VORR5]]
|
|
|
|
; CHECK: [[MVE_VPNOT5:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT4]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT5]], undef [[MVE_VORR6]]
|
|
|
|
; CHECK: [[MVE_VPNOT6:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT5]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VPNOT6]], undef [[MVE_VORR7]]
|
|
|
|
; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT7:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT7]], undef [[MVE_VORR8]]
|
|
|
|
; CHECK: [[MVE_VPNOT8:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT7]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR9:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR8]], [[MVE_VORR8]], 1, [[MVE_VPNOT8]], undef [[MVE_VORR9]]
|
|
|
|
; CHECK: [[MVE_VPNOT9:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT8]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR10:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR9]], [[MVE_VORR9]], 1, [[MVE_VPNOT9]], undef [[MVE_VORR10]]
|
|
|
|
; CHECK: [[MVE_VPNOT10:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT9]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR11:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR10]], [[MVE_VORR10]], 1, [[MVE_VPNOT10]], undef [[MVE_VORR11]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr
|
|
|
|
%6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr
|
|
|
|
%7:mqpr = MVE_VORR %6:mqpr, %6:mqpr, 1, %2:vccr, undef %7:mqpr
|
|
|
|
%8:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%9:vccr = MVE_VPNOT %8:vccr, 0, $noreg
|
|
|
|
%10:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %8:vccr, undef %10:mqpr
|
|
|
|
%11:mqpr = MVE_VORR %10:mqpr, %10:mqpr, 1, %9:vccr, undef %11:mqpr
|
|
|
|
%12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, %8:vccr, undef %12:mqpr
|
|
|
|
%13:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %9:vccr, undef %13:mqpr
|
|
|
|
%14:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%15:vccr = MVE_VPNOT %14:vccr, 0, $noreg
|
|
|
|
%16:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %15:vccr, undef %16:mqpr
|
|
|
|
%17:mqpr = MVE_VORR %16:mqpr, %16:mqpr, 1, %14:vccr, undef %17:mqpr
|
|
|
|
%18:mqpr = MVE_VORR %17:mqpr, %17:mqpr, 1, %15:vccr, undef %18:mqpr
|
|
|
|
%19:mqpr = MVE_VORR %18:mqpr, %18:mqpr, 1, %14:vccr, undef %19:mqpr
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: spill_prevention_predicated_vpnots
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: spill_prevention_predicated_vpnots
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 1, [[MVE_VCMPs32_]]
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 1, [[MVE_VCMPs32_1]]
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VPNOT1]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that predicated VPNOTs are not considered by this pass
|
|
|
|
; (This means that these examples should not be optimized.)
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 1, %2:vccr
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %2:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %3:vccr, undef %5:mqpr
|
|
|
|
bb.1:
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 1, %2:vccr
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %3:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %2:vccr, undef %5:mqpr
|
|
|
|
%6:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, %3:vccr, undef %6:mqpr
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: spill_prevention_copies
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
;
|
|
|
|
; Tests that VPNOTs are replaced by a COPY instead of inserting a VPNOT
|
|
|
|
; (which would result in a double VPNOT).
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
; CHECK-LABEL: name: spill_prevention_copies
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr
|
|
|
|
%5:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%6:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %5:vccr, undef %6:mqpr
|
|
|
|
%7:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%8:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %7:vccr, undef %8:mqpr
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: spill_prevention_vpnot_reordering
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: spill_prevention_vpnot_reordering
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR1]], 1, [[MVE_VPNOT]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %2:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR3]]
|
|
|
|
; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %2:mqpr, %1:mqpr, 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR4]]
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR3]], [[MVE_VORR4]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR5]]
|
|
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit %1:mqpr
|
|
|
|
;
|
|
|
|
; Tests that the first VPNOT is moved down when the result of the VCMP is used
|
|
|
|
; before the first usage of the VPNOT's result.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %2:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, %2:vccr, undef %5:mqpr
|
|
|
|
%6:mqpr = MVE_VORR %4:mqpr, %5:mqpr, 1, %3:vccr, undef %6:mqpr
|
|
|
|
bb.1:
|
|
|
|
; Test again with a "killed" flag to check if it's properly removed.
|
|
|
|
%7:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%8:vccr = MVE_VPNOT %7:vccr, 0, $noreg
|
|
|
|
%9:mqpr = MVE_VORR %0:mqpr, %1:mqpr, 1, %7:vccr, undef %9:mqpr
|
|
|
|
%10:mqpr = MVE_VORR %1:mqpr, %0:mqpr, 1, killed %7:vccr, undef %10:mqpr
|
|
|
|
%11:mqpr = MVE_VORR %9:mqpr, %10:mqpr, 1, %8:vccr, undef %11:mqpr
|
|
|
|
tBX_RET 14, $noreg, implicit %0:mqpr
|
|
|
|
...
|
|
|
|
---
|
|
|
|
name: spill_prevention_stop_after_write
|
|
|
|
alignment: 4
|
|
|
|
body: |
|
|
|
|
; CHECK-LABEL: name: spill_prevention_stop_after_write
|
|
|
|
; CHECK: bb.0:
|
|
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
|
|
; CHECK: [[MVE_VCMPs32_:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VPNOT1:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR]], [[MVE_VORR]], 1, [[MVE_VPNOT1]], undef [[MVE_VORR1]]
|
|
|
|
; CHECK: [[VMSR_P0_:%[0-9]+]]:vccr = VMSR_P0 killed %7:gpr, 14 /* CC::al */, $noreg
|
|
|
|
; CHECK: [[MVE_VORR2:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR1]], [[MVE_VORR1]], 1, [[MVE_VCMPs32_]], undef [[MVE_VORR2]]
|
|
|
|
; CHECK: [[MVE_VORR3:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR2]], [[MVE_VORR2]], 1, [[MVE_VPNOT]], undef [[MVE_VORR3]]
|
|
|
|
; CHECK: bb.1:
|
|
|
|
; CHECK: [[MVE_VCMPs32_1:%[0-9]+]]:vccr = MVE_VCMPs32 %1:mqpr, %2:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VPNOT2:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VCMPs32_1]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR4:%[0-9]+]]:mqpr = MVE_VORR %1:mqpr, %1:mqpr, 1, [[MVE_VPNOT2]], undef [[MVE_VORR]]
|
|
|
|
; CHECK: [[MVE_VPNOT3:%[0-9]+]]:vccr = MVE_VPNOT [[MVE_VPNOT2]], 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR5:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR4]], [[MVE_VORR4]], 1, [[MVE_VPNOT3]], undef [[MVE_VORR5]]
|
|
|
|
; CHECK: [[MVE_VCMPs32_2:%[0-9]+]]:vccr = MVE_VCMPs32 %2:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
; CHECK: [[MVE_VORR6:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR5]], [[MVE_VORR5]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR6]]
|
|
|
|
; CHECK: [[MVE_VORR7:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR6]], [[MVE_VORR6]], 1, [[MVE_VCMPs32_1]], undef [[MVE_VORR7]]
|
|
|
|
; CHECK: [[MVE_VORR8:%[0-9]+]]:mqpr = MVE_VORR [[MVE_VORR7]], [[MVE_VORR7]], 1, [[MVE_VPNOT2]], undef [[MVE_VORR8]]
|
|
|
|
;
|
|
|
|
; Tests that the optimisation stops when it sees an instruction
|
|
|
|
; that writes to VPR, and that doesn't use any of the registers we care about.
|
|
|
|
;
|
|
|
|
bb.0:
|
|
|
|
%2:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%3:vccr = MVE_VPNOT %2:vccr, 0, $noreg
|
|
|
|
%4:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %3:vccr, undef %4:mqpr
|
|
|
|
%5:mqpr = MVE_VORR %4:mqpr, %4:mqpr, 1, %2:vccr, undef %5:mqpr
|
|
|
|
%6:vccr = VMSR_P0 killed %20:gpr, 14, $noreg
|
|
|
|
%7:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, %2:vccr, undef %7:mqpr
|
|
|
|
%8:mqpr = MVE_VORR %7:mqpr, %7:mqpr, 1, %3:vccr, undef %8:mqpr
|
|
|
|
bb.1:
|
|
|
|
%9:vccr = MVE_VCMPs32 %0:mqpr, %1:mqpr, 10, 0, $noreg
|
|
|
|
%10:vccr = MVE_VPNOT %9:vccr, 0, $noreg
|
|
|
|
%11:mqpr = MVE_VORR %0:mqpr, %0:mqpr, 1, %10:vccr, undef %4:mqpr
|
|
|
|
%12:mqpr = MVE_VORR %11:mqpr, %11:mqpr, 1, %9:vccr, undef %12:mqpr
|
|
|
|
%13:vccr = MVE_VCMPs32 %1:mqpr, %0:mqpr, 10, 0, $noreg
|
|
|
|
%14:mqpr = MVE_VORR %12:mqpr, %12:mqpr, 1, %10:vccr, undef %14:mqpr
|
|
|
|
%15:mqpr = MVE_VORR %14:mqpr, %14:mqpr, 1, %9:vccr, undef %15:mqpr
|
|
|
|
%16:mqpr = MVE_VORR %15:mqpr, %15:mqpr, 1, %10:vccr, undef %16:mqpr
|
|
|
|
...
|