[ARM] Select MVE fp add and sub

The same as integer arithmetic, we can add simple floating point MVE addition and
subtraction patterns.

Initial code by David Sherwood

Differential Revision: https://reviews.llvm.org/D63257

llvm-svn: 364629
This commit is contained in:
David Green 2019-06-28 07:41:09 +00:00
parent 9a92be1b35
commit 62889b0ea5
2 changed files with 208 additions and 0 deletions

View File

@ -2310,9 +2310,23 @@ def MVE_VFMSf16 : MVE_VADDSUBFMA_fp<"vfms", "f16", 0b1, 0b1, 0b0, 0b1,
def MVE_VADDf32 : MVE_VADDSUBFMA_fp<"vadd", "f32", 0b0, 0b0, 0b1, 0b0>;
def MVE_VADDf16 : MVE_VADDSUBFMA_fp<"vadd", "f16", 0b1, 0b0, 0b1, 0b0>;
let Predicates = [HasMVEFloat] in {
def : Pat<(v4f32 (fadd (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))),
(v4f32 (MVE_VADDf32 (v4f32 MQPR:$val1), (v4f32 MQPR:$val2)))>;
def : Pat<(v8f16 (fadd (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))),
(v8f16 (MVE_VADDf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>;
}
def MVE_VSUBf32 : MVE_VADDSUBFMA_fp<"vsub", "f32", 0b0, 0b0, 0b1, 0b1>;
def MVE_VSUBf16 : MVE_VADDSUBFMA_fp<"vsub", "f16", 0b1, 0b0, 0b1, 0b1>;
let Predicates = [HasMVEFloat] in {
def : Pat<(v4f32 (fsub (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))),
(v4f32 (MVE_VSUBf32 (v4f32 MQPR:$val1), (v4f32 MQPR:$val2)))>;
def : Pat<(v8f16 (fsub (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))),
(v8f16 (MVE_VSUBf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>;
}
class MVE_VCADD<string suffix, bit size, list<dag> pattern=[]>
: MVEFloatArithNeon<"vcadd", suffix, size, (outs MQPR:$Qd),
(ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot),

View File

@ -32,6 +32,103 @@ entry:
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <4 x float> @add_float32_t(<4 x float> %src1, <4 x float> %src2) {
; CHECK-MVE-LABEL: add_float32_t:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vadd.f32 s8, s4, s0
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vadd.f32 s10, s5, s1
; CHECK-MVE-NEXT: vadd.f32 s12, s6, s2
; CHECK-MVE-NEXT: vadd.f32 s4, s7, s3
; CHECK-MVE-NEXT: vdup.32 q0, r0
; CHECK-MVE-NEXT: vmov.f32 s0, s8
; CHECK-MVE-NEXT: vmov.f32 s1, s10
; CHECK-MVE-NEXT: vmov.f32 s2, s12
; CHECK-MVE-NEXT: vmov.f32 s3, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: add_float32_t:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vadd.f32 q0, q1, q0
; CHECK-MVEFP-NEXT: bx lr
entry:
%0 = fadd nnan ninf nsz <4 x float> %src2, %src1
ret <4 x float> %0
}
define arm_aapcs_vfpcc <8 x half> @add_float16_t(<8 x half> %src1, <8 x half> %src2) {
; CHECK-MVE-LABEL: add_float16_t:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vmov.u16 r0, q0[0]
; CHECK-MVE-NEXT: vmov.u16 r1, q1[0]
; CHECK-MVE-NEXT: vmov s10, r1
; CHECK-MVE-NEXT: vmov.u16 r1, q0[1]
; CHECK-MVE-NEXT: vmov s8, r0
; CHECK-MVE-NEXT: movs r2, #0
; CHECK-MVE-NEXT: vadd.f16 s8, s10, s8
; CHECK-MVE-NEXT: vmov r0, s8
; CHECK-MVE-NEXT: vmov s8, r1
; CHECK-MVE-NEXT: vmov.u16 r1, q1[1]
; CHECK-MVE-NEXT: vmov s10, r1
; CHECK-MVE-NEXT: vadd.f16 s8, s10, s8
; CHECK-MVE-NEXT: vmov r1, s8
; CHECK-MVE-NEXT: vdup.16 q2, r2
; CHECK-MVE-NEXT: vmov.16 q2[0], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[2]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[2]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vmov.16 q2[1], r1
; CHECK-MVE-NEXT: vadd.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[2], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[3]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[3]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vadd.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[3], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[4]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[4]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vadd.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[4], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[5]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[5]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vadd.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[5], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[6]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[6]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vadd.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[6], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[7]
; CHECK-MVE-NEXT: vmov s0, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[7]
; CHECK-MVE-NEXT: vmov s2, r0
; CHECK-MVE-NEXT: vadd.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q2[7], r0
; CHECK-MVE-NEXT: vmov q0, q2
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: add_float16_t:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vadd.f16 q0, q1, q0
; CHECK-MVEFP-NEXT: bx lr
entry:
%0 = fadd nnan ninf nsz <8 x half> %src2, %src1
ret <8 x half> %0
}
define arm_aapcs_vfpcc <16 x i8> @sub_int8_t(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: sub_int8_t:
@ -62,3 +159,100 @@ entry:
%0 = sub nsw <4 x i32> %src2, %src1
ret <4 x i32> %0
}
define arm_aapcs_vfpcc <4 x float> @sub_float32_t(<4 x float> %src1, <4 x float> %src2) {
; CHECK-MVE-LABEL: sub_float32_t:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vsub.f32 s8, s4, s0
; CHECK-MVE-NEXT: movs r0, #0
; CHECK-MVE-NEXT: vsub.f32 s10, s5, s1
; CHECK-MVE-NEXT: vsub.f32 s12, s6, s2
; CHECK-MVE-NEXT: vsub.f32 s4, s7, s3
; CHECK-MVE-NEXT: vdup.32 q0, r0
; CHECK-MVE-NEXT: vmov.f32 s0, s8
; CHECK-MVE-NEXT: vmov.f32 s1, s10
; CHECK-MVE-NEXT: vmov.f32 s2, s12
; CHECK-MVE-NEXT: vmov.f32 s3, s4
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: sub_float32_t:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vsub.f32 q0, q1, q0
; CHECK-MVEFP-NEXT: bx lr
entry:
%0 = fsub nnan ninf nsz <4 x float> %src2, %src1
ret <4 x float> %0
}
define arm_aapcs_vfpcc <8 x half> @sub_float16_t(<8 x half> %src1, <8 x half> %src2) {
; CHECK-MVE-LABEL: sub_float16_t:
; CHECK-MVE: @ %bb.0: @ %entry
; CHECK-MVE-NEXT: vmov.u16 r0, q0[0]
; CHECK-MVE-NEXT: vmov.u16 r1, q1[0]
; CHECK-MVE-NEXT: vmov s10, r1
; CHECK-MVE-NEXT: vmov.u16 r1, q0[1]
; CHECK-MVE-NEXT: vmov s8, r0
; CHECK-MVE-NEXT: movs r2, #0
; CHECK-MVE-NEXT: vsub.f16 s8, s10, s8
; CHECK-MVE-NEXT: vmov r0, s8
; CHECK-MVE-NEXT: vmov s8, r1
; CHECK-MVE-NEXT: vmov.u16 r1, q1[1]
; CHECK-MVE-NEXT: vmov s10, r1
; CHECK-MVE-NEXT: vsub.f16 s8, s10, s8
; CHECK-MVE-NEXT: vmov r1, s8
; CHECK-MVE-NEXT: vdup.16 q2, r2
; CHECK-MVE-NEXT: vmov.16 q2[0], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[2]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[2]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vmov.16 q2[1], r1
; CHECK-MVE-NEXT: vsub.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[2], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[3]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[3]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vsub.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[3], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[4]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[4]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vsub.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[4], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[5]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[5]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vsub.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[5], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[6]
; CHECK-MVE-NEXT: vmov s12, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[6]
; CHECK-MVE-NEXT: vmov s14, r0
; CHECK-MVE-NEXT: vsub.f16 s12, s14, s12
; CHECK-MVE-NEXT: vmov r0, s12
; CHECK-MVE-NEXT: vmov.16 q2[6], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q0[7]
; CHECK-MVE-NEXT: vmov s0, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q1[7]
; CHECK-MVE-NEXT: vmov s2, r0
; CHECK-MVE-NEXT: vsub.f16 s0, s2, s0
; CHECK-MVE-NEXT: vmov r0, s0
; CHECK-MVE-NEXT: vmov.16 q2[7], r0
; CHECK-MVE-NEXT: vmov q0, q2
; CHECK-MVE-NEXT: bx lr
;
; CHECK-MVEFP-LABEL: sub_float16_t:
; CHECK-MVEFP: @ %bb.0: @ %entry
; CHECK-MVEFP-NEXT: vsub.f16 q0, q1, q0
; CHECK-MVEFP-NEXT: bx lr
entry:
%0 = fsub nnan ninf nsz <8 x half> %src2, %src1
ret <8 x half> %0
}