forked from OSchip/llvm-project
[AArch64][SVE] Add patterns for logical immediate operations.
Summary: Add pattern matching for the following SVE logical vector and immediate instructions: - and/bic, orr/orn, eor/eon. Reviewers: sdesmalen, huntergr, rengolin, efriedma, c-rhodes, mgudim, kmclaughlin Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits, amehsan Tags: #llvm Differential Revision: https://reviews.llvm.org/D71483
This commit is contained in:
parent
f9a706a36a
commit
f933878991
|
@ -1104,6 +1104,12 @@ class AdvSIMD_ScatterStore_VectorBase_Intrinsic
|
|||
],
|
||||
[IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
|
||||
|
||||
class AdvSIMD_1VectorArg_Imm64_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>,
|
||||
llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<1>]>;
|
||||
|
||||
//
|
||||
// Loads
|
||||
//
|
||||
|
@ -1271,6 +1277,10 @@ def int_aarch64_sve_orns : AdvSIMD_Pred2VectorArg_Intrinsic;
|
|||
def int_aarch64_sve_nors : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_nands : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_orr_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
def int_aarch64_sve_eor_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
def int_aarch64_sve_and_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
|
||||
//
|
||||
// Conversion
|
||||
//
|
||||
|
|
|
@ -164,6 +164,11 @@ public:
|
|||
return SelectSVEAddSubImm(N, VT, Imm, Shift);
|
||||
}
|
||||
|
||||
template<MVT::SimpleValueType VT>
|
||||
bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
|
||||
return SelectSVELogicalImm(N, VT, Imm);
|
||||
}
|
||||
|
||||
/// Form sequences of consecutive 64/128-bit registers for use in NEON
|
||||
/// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
|
||||
/// between 1 and 4 elements. If it contains a single element that is returned
|
||||
|
@ -241,6 +246,8 @@ private:
|
|||
bool SelectCMP_SWAP(SDNode *N);
|
||||
|
||||
bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
|
||||
|
||||
bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm);
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
||||
|
@ -2851,6 +2858,35 @@ bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SD
|
|||
return false;
|
||||
}
|
||||
|
||||
bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm) {
|
||||
if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
|
||||
uint64_t ImmVal = CNode->getZExtValue();
|
||||
SDLoc DL(N);
|
||||
|
||||
// Shift mask depending on type size.
|
||||
switch (VT.SimpleTy) {
|
||||
case MVT::i8:
|
||||
ImmVal &= 0xFF;
|
||||
ImmVal |= (ImmVal << 8);
|
||||
case MVT::i16:
|
||||
ImmVal &= 0xFFFF;
|
||||
ImmVal |= (ImmVal << 16);
|
||||
case MVT::i32:
|
||||
ImmVal &= 0xFFFFFFFF;
|
||||
ImmVal |= (ImmVal << 32);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
uint64_t encoding;
|
||||
if (AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding)) {
|
||||
Imm = CurDAG->getTargetConstant(encoding, DL, MVT::i64);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
|
||||
// tagp(FrameIndex, IRGstack, tag_offset):
|
||||
|
|
|
@ -117,9 +117,9 @@ let Predicates = [HasSVE] in {
|
|||
defm EORV_VPZ : sve_int_reduce_2<0b001, "eorv", AArch64eorv_pred>;
|
||||
defm ANDV_VPZ : sve_int_reduce_2<0b010, "andv", AArch64andv_pred>;
|
||||
|
||||
defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn">;
|
||||
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon">;
|
||||
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic">;
|
||||
defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", int_aarch64_sve_orr_imm>;
|
||||
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", int_aarch64_sve_eor_imm>;
|
||||
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", int_aarch64_sve_and_imm>;
|
||||
|
||||
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", simm8>;
|
||||
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", simm8>;
|
||||
|
|
|
@ -207,6 +207,12 @@ def SVEAddSubImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i16>", [
|
|||
def SVEAddSubImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i32>", []>;
|
||||
def SVEAddSubImm64Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i64>", []>;
|
||||
|
||||
def SVELogicalImm8Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i8>", []>;
|
||||
def SVELogicalImm16Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i16>", []>;
|
||||
def SVELogicalImm32Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i32>", []>;
|
||||
def SVELogicalImm64Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i64>", []>;
|
||||
|
||||
|
||||
class SVEExactFPImm<string Suffix, string ValA, string ValB> : AsmOperandClass {
|
||||
let Name = "SVEExactFPImmOperand" # Suffix;
|
||||
let DiagnosticType = "Invalid" # Name;
|
||||
|
@ -298,6 +304,11 @@ class SVE_1_Op_Imm_OptLsl_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty
|
|||
: Pat<(vt (op (vt zprty:$Op1), (i32 (cpx i32:$imm, i32:$shift)))),
|
||||
(inst $Op1, i32:$imm, i32:$shift)>;
|
||||
|
||||
class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
||||
ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (i64 (cpx i64:$imm)))),
|
||||
(inst $Op1, i64:$imm)>;
|
||||
|
||||
class SVE_2_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
|
||||
ValueType vt2, Instruction inst>
|
||||
: Pat<(vtd (op vt1:$Op1, vt2:$Op2)),
|
||||
|
@ -1129,9 +1140,14 @@ class sve_int_log_imm<bits<2> opc, string asm>
|
|||
let ElementSize = ElementSizeNone;
|
||||
}
|
||||
|
||||
multiclass sve_int_log_imm<bits<2> opc, string asm, string alias> {
|
||||
multiclass sve_int_log_imm<bits<2> opc, string asm, string alias, SDPatternOperator op> {
|
||||
def NAME : sve_int_log_imm<opc, asm>;
|
||||
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv16i8, op, ZPR8, SVELogicalImm8Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv8i16, op, ZPR16, SVELogicalImm16Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv4i32, op, ZPR32, SVELogicalImm32Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv2i64, op, ZPR64, SVELogicalImm64Pat, !cast<Instruction>(NAME)>;
|
||||
|
||||
def : InstAlias<asm # "\t$Zdn, $Zdn, $imm",
|
||||
(!cast<Instruction>(NAME) ZPR8:$Zdn, sve_logical_imm8:$imm), 4>;
|
||||
def : InstAlias<asm # "\t$Zdn, $Zdn, $imm",
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: orr_i8:
|
||||
; CHECK: orr z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: orr_i16:
|
||||
; CHECK: orr z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: orr_i32:
|
||||
; CHECK: orr z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: orr_i64:
|
||||
; CHECK: orr z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: eor_i8:
|
||||
; CHECK: eor z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: eor_i16:
|
||||
; CHECK: eor z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: eor_i32:
|
||||
; CHECK: eor z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: eor_i64:
|
||||
; CHECK: eor z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @and_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: and_i8:
|
||||
; CHECK: and z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @and_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: and_i16:
|
||||
; CHECK: and z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @and_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: and_i32:
|
||||
; CHECK: and z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @and_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: and_i64:
|
||||
; CHECK: and z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
Loading…
Reference in New Issue