[AArch64][SVE] Use SIMD variant of INSR when scalar is the result of a vector extract

At the intrinsic layer the sve.insr operation takes a scalar. When this
scalar is an integer we are forcing a data transition between GPRs and
ZPRs that is potentially costly.

Often the integer scalar is the result of a vector extract, when
performing a reduction for example. In such cases we should keep all
data within the ZPRs.

Co-authored-by: Paul Walker <paul.walker@arm.com>

Differential Revision: https://reviews.llvm.org/D101169
This commit is contained in:
Bradley Smith 2021-04-23 16:34:26 +01:00
parent 89085bcc86
commit 354604a2a7
2 changed files with 74 additions and 10 deletions

View File

@ -1292,8 +1292,8 @@ multiclass sve_int_perm_insrs<string asm, SDPatternOperator op> {
}
class sve_int_perm_insrv<bits<2> sz8_64, string asm, ZPRRegOp zprty,
RegisterClass srcRegType>
: I<(outs zprty:$Zdn), (ins zprty:$_Zdn, srcRegType:$Vm),
FPRasZPROperand srcOpType>
: I<(outs zprty:$Zdn), (ins zprty:$_Zdn, srcOpType:$Vm),
asm, "\t$Zdn, $Vm",
"",
[]>, Sched<[]> {
@ -1310,16 +1310,31 @@ class sve_int_perm_insrv<bits<2> sz8_64, string asm, ZPRRegOp zprty,
}
multiclass sve_int_perm_insrv<string asm, SDPatternOperator op> {
def _B : sve_int_perm_insrv<0b00, asm, ZPR8, FPR8>;
def _H : sve_int_perm_insrv<0b01, asm, ZPR16, FPR16>;
def _S : sve_int_perm_insrv<0b10, asm, ZPR32, FPR32>;
def _D : sve_int_perm_insrv<0b11, asm, ZPR64, FPR64>;
def _B : sve_int_perm_insrv<0b00, asm, ZPR8, FPR8asZPR>;
def _H : sve_int_perm_insrv<0b01, asm, ZPR16, FPR16asZPR>;
def _S : sve_int_perm_insrv<0b10, asm, ZPR32, FPR32asZPR>;
def _D : sve_int_perm_insrv<0b11, asm, ZPR64, FPR64asZPR>;
def : SVE_2_Op_Pat<nxv8f16, op, nxv8f16, f16, !cast<Instruction>(NAME # _H)>;
def : SVE_2_Op_Pat<nxv4f32, op, nxv4f32, f32, !cast<Instruction>(NAME # _S)>;
def : SVE_2_Op_Pat<nxv2f64, op, nxv2f64, f64, !cast<Instruction>(NAME # _D)>;
def : Pat<(nxv8f16 (op nxv8f16:$Zn, f16:$Vm)),
(!cast<Instruction>(NAME # _H) $Zn, (INSERT_SUBREG (IMPLICIT_DEF), $Vm, hsub))>;
def : Pat<(nxv4f32 (op nxv4f32:$Zn, f32:$Vm)),
(!cast<Instruction>(NAME # _S) $Zn, (INSERT_SUBREG (IMPLICIT_DEF), $Vm, ssub))>;
def : Pat<(nxv2f64 (op nxv2f64:$Zn, f64:$Vm)),
(!cast<Instruction>(NAME # _D) $Zn, (INSERT_SUBREG (IMPLICIT_DEF), $Vm, dsub))>;
def : Pat<(nxv8bf16 (op nxv8bf16:$Zn, bf16:$Vm)),
(!cast<Instruction>(NAME # _H) $Zn, (INSERT_SUBREG (IMPLICIT_DEF), $Vm, hsub))>;
// Keep integer insertions within the vector unit.
def : Pat<(nxv16i8 (op (nxv16i8 ZPR:$Zn), (i32 (vector_extract (nxv16i8 ZPR:$Vm), 0)))),
(!cast<Instruction>(NAME # _B) $Zn, ZPR:$Vm)>;
def : Pat<(nxv8i16 (op (nxv8i16 ZPR:$Zn), (i32 (vector_extract (nxv8i16 ZPR:$Vm), 0)))),
(!cast<Instruction>(NAME # _H) $Zn, ZPR:$Vm)>;
def : Pat<(nxv4i32 (op (nxv4i32 ZPR:$Zn), (i32 (vector_extract (nxv4i32 ZPR:$Vm), 0)))),
(!cast<Instruction>(NAME # _S) $Zn, ZPR: $Vm)>;
def : Pat<(nxv2i64 (op (nxv2i64 ZPR:$Zn), (i64 (vector_extract (nxv2i64 ZPR:$Vm), 0)))),
(!cast<Instruction>(NAME # _D) $Zn, ZPR:$Vm)>;
def : SVE_2_Op_Pat<nxv8bf16, op, nxv8bf16, bf16, !cast<Instruction>(NAME # _H)>;
}
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,49 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
define <vscale x 16 x i8> @insr_zpr_only_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
; CHECK-LABEL: insr_zpr_only_nxv16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: insr z0.b, b1
; CHECK-NEXT: ret
%t0 = extractelement <vscale x 16 x i8> %b, i64 0
%t1 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %a, i8 %t0)
ret <vscale x 16 x i8> %t1
}
define <vscale x 8 x i16> @insr_zpr_only_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
; CHECK-LABEL: insr_zpr_only_nxv8i16:
; CHECK: // %bb.0:
; CHECK-NEXT: insr z0.h, h1
; CHECK-NEXT: ret
%t0 = extractelement <vscale x 8 x i16> %b, i64 0
%t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> %a, i16 %t0)
ret <vscale x 8 x i16> %t1
}
define <vscale x 4 x i32> @insr_zpr_only_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
; CHECK-LABEL: insr_zpr_only_nxv4i32:
; CHECK: // %bb.0:
; CHECK-NEXT: insr z0.s, s1
; CHECK-NEXT: ret
%t0 = extractelement <vscale x 4 x i32> %b, i64 0
%t1 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.insr.nxv4i32(<vscale x 4 x i32> %a, i32 %t0)
ret <vscale x 4 x i32> %t1
}
define <vscale x 2 x i64> @insr_zpr_only_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
; CHECK-LABEL: insr_zpr_only_nxv2i64:
; CHECK: // %bb.0:
; CHECK-NEXT: insr z0.d, d1
; CHECK-NEXT: ret
%t0 = extractelement <vscale x 2 x i64> %b, i64 0
%t1 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.insr.nxv2i64(<vscale x 2 x i64> %a, i64 %t0)
ret <vscale x 2 x i64> %t1
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8>, i8)
declare <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16>, i16)
declare <vscale x 4 x i32> @llvm.aarch64.sve.insr.nxv4i32(<vscale x 4 x i32>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.insr.nxv2i64(<vscale x 2 x i64>, i64)
attributes #0 = { "target-features"="+sve" }