forked from OSchip/llvm-project
[AArch64][SVE] Add patterns for integer mla/mls.
We probably want to introduce pseudo-instructions at some point, like we have for binary operations, but this seems okay for now. One thing I'm not sure about is whether we should be doing this as a DAGCombine instead of directly pattern-matching it. I don't see any big downside to doing it this way, though. Differential Revision: https://reviews.llvm.org/D85681
This commit is contained in:
parent
bb18532399
commit
be944c85f3
|
@ -233,6 +233,11 @@ def setoeq_or_seteq : PatFrags<(ops node:$lhs, node:$rhs),
|
|||
def setone_or_setne : PatFrags<(ops node:$lhs, node:$rhs),
|
||||
[(setone node:$lhs, node:$rhs),
|
||||
(setne node:$lhs, node:$rhs)]>;
|
||||
def AArch64mul_p_oneuse : PatFrag<(ops node:$pred, node:$src1, node:$src2),
|
||||
(AArch64mul_p node:$pred, node:$src1, node:$src2), [{
|
||||
return N->hasOneUse();
|
||||
}]>;
|
||||
|
||||
|
||||
let Predicates = [HasSVE] in {
|
||||
defm RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>;
|
||||
|
@ -281,8 +286,8 @@ let Predicates = [HasSVE] in {
|
|||
|
||||
defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
|
||||
defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
|
||||
defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", int_aarch64_sve_mla>;
|
||||
defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", int_aarch64_sve_mls>;
|
||||
defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", int_aarch64_sve_mla, add, AArch64mul_p_oneuse>;
|
||||
defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", int_aarch64_sve_mls, sub, AArch64mul_p_oneuse>;
|
||||
|
||||
// SVE predicated integer reductions.
|
||||
defm SADDV_VPZ : sve_int_reduce_0_saddv<0b000, "saddv", int_aarch64_sve_saddv>;
|
||||
|
|
|
@ -2518,7 +2518,8 @@ class sve_int_mlas_vvv_pred<bits<2> sz8_64, bits<1> opc, string asm,
|
|||
let ElementSize = zprty.ElementSize;
|
||||
}
|
||||
|
||||
multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op> {
|
||||
multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op,
|
||||
SDPatternOperator outerop, SDPatternOperator mulop> {
|
||||
def _B : sve_int_mlas_vvv_pred<0b00, opc, asm, ZPR8>;
|
||||
def _H : sve_int_mlas_vvv_pred<0b01, opc, asm, ZPR16>;
|
||||
def _S : sve_int_mlas_vvv_pred<0b10, opc, asm, ZPR32>;
|
||||
|
@ -2528,6 +2529,15 @@ multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op>
|
|||
def : SVE_4_Op_Pat<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_4_Op_Pat<nxv4i32, op, nxv4i1, nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_4_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
|
||||
|
||||
def : Pat<(outerop nxv16i8:$Op1, (mulop nxv16i1:$pred, nxv16i8:$Op2, nxv16i8:$Op3)),
|
||||
(!cast<Instruction>(NAME # _B) $pred, $Op1, $Op2, $Op3)>;
|
||||
def : Pat<(outerop nxv8i16:$Op1, (mulop nxv8i1:$pred, nxv8i16:$Op2, nxv8i16:$Op3)),
|
||||
(!cast<Instruction>(NAME # _H) $pred, $Op1, $Op2, $Op3)>;
|
||||
def : Pat<(outerop nxv4i32:$Op1, (mulop nxv4i1:$pred, nxv4i32:$Op2, nxv4i32:$Op3)),
|
||||
(!cast<Instruction>(NAME # _S) $pred, $Op1, $Op2, $Op3)>;
|
||||
def : Pat<(outerop nxv2i64:$Op1, (mulop nxv2i1:$pred, nxv2i64:$Op2, nxv2i64:$Op3)),
|
||||
(!cast<Instruction>(NAME # _D) $pred, $Op1, $Op2, $Op3)>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
|
@ -131,8 +131,7 @@ define <vscale x 16 x i8> @srem_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
|||
; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
|
||||
; CHECK-NEXT: uzp1 z2.b, z3.b, z2.b
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: mul z1.b, p0/m, z1.b, z2.b
|
||||
; CHECK-NEXT: sub z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%div = srem <vscale x 16 x i8> %a, %b
|
||||
ret <vscale x 16 x i8> %div
|
||||
|
@ -151,8 +150,7 @@ define <vscale x 8 x i16> @srem_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
|||
; CHECK-NEXT: sdiv z3.s, p0/m, z3.s, z4.s
|
||||
; CHECK-NEXT: uzp1 z2.h, z3.h, z2.h
|
||||
; CHECK-NEXT: ptrue p0.h
|
||||
; CHECK-NEXT: mul z1.h, p0/m, z1.h, z2.h
|
||||
; CHECK-NEXT: sub z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%div = srem <vscale x 8 x i16> %a, %b
|
||||
ret <vscale x 8 x i16> %div
|
||||
|
@ -164,8 +162,7 @@ define <vscale x 4 x i32> @srem_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
|||
; CHECK-NEXT: ptrue p0.s
|
||||
; CHECK-NEXT: movprfx z2, z0
|
||||
; CHECK-NEXT: sdiv z2.s, p0/m, z2.s, z1.s
|
||||
; CHECK-NEXT: mul z1.s, p0/m, z1.s, z2.s
|
||||
; CHECK-NEXT: sub z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%div = srem <vscale x 4 x i32> %a, %b
|
||||
ret <vscale x 4 x i32> %div
|
||||
|
@ -177,8 +174,7 @@ define <vscale x 2 x i64> @srem_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
|||
; CHECK-NEXT: ptrue p0.d
|
||||
; CHECK-NEXT: movprfx z2, z0
|
||||
; CHECK-NEXT: sdiv z2.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d
|
||||
; CHECK-NEXT: sub z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%div = srem <vscale x 2 x i64> %a, %b
|
||||
ret <vscale x 2 x i64> %div
|
||||
|
@ -315,8 +311,7 @@ define <vscale x 16 x i8> @urem_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
|||
; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
|
||||
; CHECK-NEXT: uzp1 z2.b, z3.b, z2.b
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: mul z1.b, p0/m, z1.b, z2.b
|
||||
; CHECK-NEXT: sub z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%div = urem <vscale x 16 x i8> %a, %b
|
||||
ret <vscale x 16 x i8> %div
|
||||
|
@ -335,8 +330,7 @@ define <vscale x 8 x i16> @urem_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
|||
; CHECK-NEXT: udiv z3.s, p0/m, z3.s, z4.s
|
||||
; CHECK-NEXT: uzp1 z2.h, z3.h, z2.h
|
||||
; CHECK-NEXT: ptrue p0.h
|
||||
; CHECK-NEXT: mul z1.h, p0/m, z1.h, z2.h
|
||||
; CHECK-NEXT: sub z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%div = urem <vscale x 8 x i16> %a, %b
|
||||
ret <vscale x 8 x i16> %div
|
||||
|
@ -348,8 +342,7 @@ define <vscale x 4 x i32> @urem_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
|||
; CHECK-NEXT: ptrue p0.s
|
||||
; CHECK-NEXT: movprfx z2, z0
|
||||
; CHECK-NEXT: udiv z2.s, p0/m, z2.s, z1.s
|
||||
; CHECK-NEXT: mul z1.s, p0/m, z1.s, z2.s
|
||||
; CHECK-NEXT: sub z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: mls z0.s, p0/m, z2.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%div = urem <vscale x 4 x i32> %a, %b
|
||||
ret <vscale x 4 x i32> %div
|
||||
|
@ -361,8 +354,7 @@ define <vscale x 2 x i64> @urem_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
|||
; CHECK-NEXT: ptrue p0.d
|
||||
; CHECK-NEXT: movprfx z2, z0
|
||||
; CHECK-NEXT: udiv z2.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d
|
||||
; CHECK-NEXT: sub z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: mls z0.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%div = urem <vscale x 2 x i64> %a, %b
|
||||
ret <vscale x 2 x i64> %div
|
||||
|
|
|
@ -105,10 +105,11 @@ define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_1(<vscale x 2 x
|
|||
; CHECK-LABEL: scalable_of_scalable_1:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: mov z1.d, #1 // =0x1
|
||||
; CHECK-NEXT: mov z0.d, x0
|
||||
; CHECK-NEXT: mul z1.d, z1.d, #1
|
||||
; CHECK-NEXT: add z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: mov z2.d, x8
|
||||
; CHECK-NEXT: ptrue p0.d
|
||||
; CHECK-NEXT: mla z0.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
|
||||
%d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, <vscale x 2 x i64> %idx
|
||||
|
@ -119,9 +120,10 @@ define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_2(<vscale x 2 x
|
|||
; CHECK-LABEL: scalable_of_scalable_2:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: rdvl x8, #1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: mul z1.d, z1.d, #1
|
||||
; CHECK-NEXT: add z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: mov z1.d, #1 // =0x1
|
||||
; CHECK-NEXT: mov z2.d, x8
|
||||
; CHECK-NEXT: ptrue p0.d
|
||||
; CHECK-NEXT: mla z0.d, p0/m, z2.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
|
||||
%d = getelementptr <vscale x 2 x i64>, <vscale x 2 x <vscale x 2 x i64>*> %base, <vscale x 2 x i64> %idx
|
||||
|
@ -135,8 +137,7 @@ define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_3(<vscale x 2 x
|
|||
; CHECK-NEXT: rdvl x8, #1
|
||||
; CHECK-NEXT: sxtw z1.d, p0/m, z1.d
|
||||
; CHECK-NEXT: mov z2.d, x8
|
||||
; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d
|
||||
; CHECK-NEXT: add z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: mla z0.d, p0/m, z1.d, z2.d
|
||||
; CHECK-NEXT: ret
|
||||
%d = getelementptr <vscale x 2 x i64>, <vscale x 2 x <vscale x 2 x i64>*> %base, <vscale x 2 x i32> %idx
|
||||
ret <vscale x 2 x <vscale x 2 x i64>*> %d
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||||
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||||
|
||||
|
@ -5,96 +6,108 @@
|
|||
; WARN-NOT: warning
|
||||
|
||||
define <vscale x 2 x i64> @add_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: add_i64
|
||||
; CHECK: add z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: add_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: add z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = add <vscale x 2 x i64> %a, %b
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @add_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: add_i32
|
||||
; CHECK: add z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: add_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: add z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = add <vscale x 4 x i32> %a, %b
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @add_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: add_i16
|
||||
; CHECK: add z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: add_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: add z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = add <vscale x 8 x i16> %a, %b
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @add_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: add_i8
|
||||
; CHECK: add z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: add_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: add z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = add <vscale x 16 x i8> %a, %b
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: sub_i64
|
||||
; CHECK: sub z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: sub_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = sub <vscale x 2 x i64> %a, %b
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: sub_i32
|
||||
; CHECK: sub z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: sub_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = sub <vscale x 4 x i32> %a, %b
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: sub_i16
|
||||
; CHECK: sub z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: sub_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = sub <vscale x 8 x i16> %a, %b
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: sub_i8
|
||||
; CHECK: sub z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: sub_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sub z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = sub <vscale x 16 x i8> %a, %b
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: sqadd_i64
|
||||
; CHECK: sqadd z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: sqadd_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqadd z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: sqadd_i32
|
||||
; CHECK: sqadd z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: sqadd_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqadd z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: sqadd_i16
|
||||
; CHECK: sqadd z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: sqadd_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqadd z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: sqadd_i8
|
||||
; CHECK: sqadd z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: sqadd_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqadd z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %res
|
||||
|
@ -102,32 +115,36 @@ define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
|||
|
||||
|
||||
define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: sqsub_i64
|
||||
; CHECK: sqsub z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: sqsub_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqsub z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: sqsub_i32
|
||||
; CHECK: sqsub z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: sqsub_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqsub z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: sqsub_i16
|
||||
; CHECK: sqsub z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: sqsub_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqsub z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: sqsub_i8
|
||||
; CHECK: sqsub z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: sqsub_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: sqsub z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %res
|
||||
|
@ -135,32 +152,36 @@ define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
|||
|
||||
|
||||
define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: uqadd_i64
|
||||
; CHECK: uqadd z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: uqadd_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqadd z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: uqadd_i32
|
||||
; CHECK: uqadd z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: uqadd_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqadd z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: uqadd_i16
|
||||
; CHECK: uqadd z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: uqadd_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqadd z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: uqadd_i8
|
||||
; CHECK: uqadd z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: uqadd_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqadd z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %res
|
||||
|
@ -168,37 +189,79 @@ define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
|||
|
||||
|
||||
define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: uqsub_i64
|
||||
; CHECK: uqsub z0.d, z0.d, z1.d
|
||||
; CHECK-LABEL: uqsub_i64:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqsub z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: uqsub_i32
|
||||
; CHECK: uqsub z0.s, z0.s, z1.s
|
||||
; CHECK-LABEL: uqsub_i32:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqsub z0.s, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
||||
; CHECK-LABEL: uqsub_i16
|
||||
; CHECK: uqsub z0.h, z0.h, z1.h
|
||||
; CHECK-LABEL: uqsub_i16:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqsub z0.h, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
||||
; CHECK-LABEL: uqsub_i8
|
||||
; CHECK: uqsub z0.b, z0.b, z1.b
|
||||
; CHECK-LABEL: uqsub_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: uqsub z0.b, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @mla_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: mla_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: mla z2.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: mov z0.d, z2.d
|
||||
; CHECK-NEXT: ret
|
||||
%prod = mul <vscale x 16 x i8> %a, %b
|
||||
%res = add <vscale x 16 x i8> %c, %prod
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8>* %p) {
|
||||
; CHECK-LABEL: mla_i8_multiuse:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b
|
||||
; CHECK-NEXT: add z0.b, z2.b, z1.b
|
||||
; CHECK-NEXT: st1b { z1.b }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%prod = mul <vscale x 16 x i8> %a, %b
|
||||
store <vscale x 16 x i8> %prod, <vscale x 16 x i8>* %p
|
||||
%res = add <vscale x 16 x i8> %c, %prod
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @mls_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: mls_i8:
|
||||
; CHECK: // %bb.0:
|
||||
; CHECK-NEXT: ptrue p0.b
|
||||
; CHECK-NEXT: mls z2.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: mov z0.d, z2.d
|
||||
; CHECK-NEXT: ret
|
||||
%prod = mul <vscale x 16 x i8> %a, %b
|
||||
%res = sub <vscale x 16 x i8> %c, %prod
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
|
|
Loading…
Reference in New Issue