forked from OSchip/llvm-project
Revert "[AArch64][SVE] Replace integer immediate intrinsics with splat vector variant"
This reverts commit830e08b98b
andeb1857ce0d
. This commit leads to an unexpected failure on test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll. The review will need more changes before its re-commited.
This commit is contained in:
parent
ca520592c0
commit
c7abf88411
|
@ -1069,6 +1069,12 @@ class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
|
|||
],
|
||||
[IntrReadMem, IntrArgMemOnly]>;
|
||||
|
||||
class AdvSIMD_1VectorArg_Imm_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>,
|
||||
llvm_i32_ty],
|
||||
[IntrNoMem, ImmArg<1>]>;
|
||||
|
||||
class AdvSIMD_ScatterStore_64bitOffset_Intrinsic
|
||||
: Intrinsic<[],
|
||||
[
|
||||
|
@ -1098,6 +1104,12 @@ class AdvSIMD_ScatterStore_VectorBase_Intrinsic
|
|||
],
|
||||
[IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
|
||||
|
||||
class AdvSIMD_1VectorArg_Imm64_Intrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>,
|
||||
llvm_i64_ty],
|
||||
[IntrNoMem, ImmArg<1>]>;
|
||||
|
||||
//
|
||||
// Loads
|
||||
//
|
||||
|
@ -1118,6 +1130,14 @@ def int_aarch64_sve_add : AdvSIMD_Pred2VectorArg_Intrinsic;
|
|||
def int_aarch64_sve_sub : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_add_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_sub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_subr_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_sqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_uqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_sqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
def int_aarch64_sve_uqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
|
@ -1257,6 +1277,10 @@ def int_aarch64_sve_orns : AdvSIMD_Pred2VectorArg_Intrinsic;
|
|||
def int_aarch64_sve_nors : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
def int_aarch64_sve_nands : AdvSIMD_Pred2VectorArg_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_orr_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
def int_aarch64_sve_eor_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
def int_aarch64_sve_and_imm : AdvSIMD_1VectorArg_Imm64_Intrinsic;
|
||||
|
||||
//
|
||||
// Conversion
|
||||
//
|
||||
|
|
|
@ -93,13 +93,13 @@ let Predicates = [HasSVE] in {
|
|||
defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>;
|
||||
defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>;
|
||||
|
||||
defm ADD_ZI : sve_int_arith_imm0<0b000, "add", add>;
|
||||
defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", sub>;
|
||||
defm SUBR_ZI : sve_int_arith_imm0_subr<0b011, "subr", sub>;
|
||||
defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>;
|
||||
defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>;
|
||||
defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
|
||||
defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
|
||||
defm ADD_ZI : sve_int_arith_imm0<0b000, "add", int_aarch64_sve_add_imm>;
|
||||
defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", int_aarch64_sve_sub_imm>;
|
||||
defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr", int_aarch64_sve_subr_imm>;
|
||||
defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_imm>;
|
||||
defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_imm>;
|
||||
defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_imm>;
|
||||
defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_imm>;
|
||||
|
||||
defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
|
||||
defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
|
||||
|
@ -117,9 +117,9 @@ let Predicates = [HasSVE] in {
|
|||
defm EORV_VPZ : sve_int_reduce_2<0b001, "eorv", AArch64eorv_pred>;
|
||||
defm ANDV_VPZ : sve_int_reduce_2<0b010, "andv", AArch64andv_pred>;
|
||||
|
||||
defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", or>;
|
||||
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>;
|
||||
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>;
|
||||
defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn", int_aarch64_sve_orr_imm>;
|
||||
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", int_aarch64_sve_eor_imm>;
|
||||
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", int_aarch64_sve_and_imm>;
|
||||
|
||||
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", simm8>;
|
||||
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", simm8>;
|
||||
|
|
|
@ -299,19 +299,14 @@ class SVE_1_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
|
|||
: Pat<(vtd (op vt1:$Op1)),
|
||||
(inst $Op1)>;
|
||||
|
||||
class SVE_1_Op_Imm_OptLsl_Reverse_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
||||
ValueType it, ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))), (vt zprty:$Op1))),
|
||||
(inst $Op1, i32:$imm, i32:$shift)>;
|
||||
|
||||
class SVE_1_Op_Imm_OptLsl_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
||||
ValueType it, ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))))),
|
||||
ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (i32 (cpx i32:$imm, i32:$shift)))),
|
||||
(inst $Op1, i32:$imm, i32:$shift)>;
|
||||
|
||||
class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
||||
ValueType it, ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))),
|
||||
ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (i64 (cpx i64:$imm)))),
|
||||
(inst $Op1, i64:$imm)>;
|
||||
|
||||
class SVE_2_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1,
|
||||
|
@ -1148,10 +1143,10 @@ class sve_int_log_imm<bits<2> opc, string asm>
|
|||
multiclass sve_int_log_imm<bits<2> opc, string asm, string alias, SDPatternOperator op> {
|
||||
def NAME : sve_int_log_imm<opc, asm>;
|
||||
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv16i8, op, ZPR8, i32, SVELogicalImm8Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv8i16, op, ZPR16, i32, SVELogicalImm16Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv4i32, op, ZPR32, i32, SVELogicalImm32Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv2i64, op, ZPR64, i64, SVELogicalImm64Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv16i8, op, ZPR8, SVELogicalImm8Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv8i16, op, ZPR16, SVELogicalImm16Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv4i32, op, ZPR32, SVELogicalImm32Pat, !cast<Instruction>(NAME)>;
|
||||
def : SVE_1_Op_Imm_Log_Pat<nxv2i64, op, ZPR64, SVELogicalImm64Pat, !cast<Instruction>(NAME)>;
|
||||
|
||||
def : InstAlias<asm # "\t$Zdn, $Zdn, $imm",
|
||||
(!cast<Instruction>(NAME) ZPR8:$Zdn, sve_logical_imm8:$imm), 4>;
|
||||
|
@ -3325,22 +3320,10 @@ multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op> {
|
|||
def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>;
|
||||
def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>;
|
||||
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv16i8, op, ZPR8, i32, SVEAddSubImm8Pat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, i32, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, i32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, i64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
multiclass sve_int_arith_imm0_subr<bits<3> opc, string asm, SDPatternOperator op> {
|
||||
def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8, addsub_imm8_opt_lsl_i8>;
|
||||
def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>;
|
||||
def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>;
|
||||
def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>;
|
||||
|
||||
def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv16i8, op, ZPR8, i32, SVEAddSubImm8Pat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv8i16, op, ZPR16, i32, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv4i32, op, ZPR32, i32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Reverse_Pat<nxv2i64, op, ZPR64, i64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv16i8, op, ZPR8, SVEAddSubImm8Pat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
class sve_int_arith_imm<bits<2> sz8_64, bits<6> opc, string asm,
|
||||
|
|
|
@ -8,9 +8,11 @@ define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
|
|||
<vscale x 2 x i8>* %res_out,
|
||||
<vscale x 2 x i1> %pred) {
|
||||
; CHECK-LABEL: no_dag_combine_zext_sext
|
||||
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
||||
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
||||
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z0.d, x8
|
||||
; CHECK-NEXT: and z0.d, z1.d, z0.d
|
||||
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
|
@ -52,9 +54,11 @@ define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
|
|||
<vscale x 2 x i8>* %res_out,
|
||||
<vscale x 2 x i1> %pred) {
|
||||
; CHECK-LABEL: no_dag_combine_zext
|
||||
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
||||
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
||||
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z0.d, x8
|
||||
; CHECK-NEXT: and z0.d, z1.d, z0.d
|
||||
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %base,
|
||||
|
|
|
@ -1,519 +1,471 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; SVE Arith Vector Immediate Unpredicated CodeGen
|
||||
;
|
||||
|
||||
; ADD
|
||||
define <vscale x 16 x i8> @add_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: add_i8_low
|
||||
define <vscale x 16 x i8> @add_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: add_imm_i8_low
|
||||
; CHECK: add z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = add <vscale x 16 x i8> %a, %splat
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @add_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: add_i16_low
|
||||
define <vscale x 8 x i16> @add_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: add_imm_i16_low
|
||||
; CHECK: add z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = add <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @add_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: add_i16_high
|
||||
define <vscale x 8 x i16> @add_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: add_imm_i16_high
|
||||
; CHECK: add z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = add <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @add_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: add_i32_low
|
||||
define <vscale x 4 x i32> @add_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: add_imm_i32_low
|
||||
; CHECK: add z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = add <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @add_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: add_i32_high
|
||||
define <vscale x 4 x i32> @add_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: add_imm_i32_high
|
||||
; CHECK: add z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = add <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @add_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: add_i64_low
|
||||
define <vscale x 2 x i64> @add_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: add_imm_i64_low
|
||||
; CHECK: add z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = add <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @add_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: add_i64_high
|
||||
define <vscale x 2 x i64> @add_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: add_imm_i64_high
|
||||
; CHECK: add z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = add <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; SUBR
|
||||
define <vscale x 16 x i8> @subr_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: subr_i8_low
|
||||
; CHECK: subr z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = sub <vscale x 16 x i8> %splat, %a
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @subr_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: subr_i16_low
|
||||
; CHECK: subr z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = sub <vscale x 8 x i16> %splat, %a
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @subr_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: subr_i16_high
|
||||
; CHECK: subr z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = sub <vscale x 8 x i16> %splat, %a
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @subr_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: subr_i32_low
|
||||
; CHECK: subr z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = sub <vscale x 4 x i32> %splat, %a
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @subr_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: subr_i32_high
|
||||
; CHECK: subr z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = sub <vscale x 4 x i32> %splat, %a
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @subr_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: subr_i64_low
|
||||
; CHECK: subr z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = sub <vscale x 2 x i64> %splat, %a
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @subr_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: subr_i64_high
|
||||
; CHECK: subr z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = sub <vscale x 2 x i64> %splat, %a
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; SUB
|
||||
define <vscale x 16 x i8> @sub_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sub_i8_low
|
||||
define <vscale x 16 x i8> @sub_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sub_imm_i8_low
|
||||
; CHECK: sub z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = sub <vscale x 16 x i8> %a, %splat
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sub_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sub_i16_low
|
||||
define <vscale x 8 x i16> @sub_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sub_imm_i16_low
|
||||
; CHECK: sub z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = sub <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sub_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sub_i16_high
|
||||
define <vscale x 8 x i16> @sub_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sub_imm_i16_high
|
||||
; CHECK: sub z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = sub <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sub_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sub_i32_low
|
||||
define <vscale x 4 x i32> @sub_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sub_imm_i32_low
|
||||
; CHECK: sub z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = sub <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sub_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sub_i32_high
|
||||
define <vscale x 4 x i32> @sub_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sub_imm_i32_high
|
||||
; CHECK: sub z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = sub <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sub_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sub_i64_low
|
||||
define <vscale x 2 x i64> @sub_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sub_imm_i64_low
|
||||
; CHECK: sub z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = sub <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sub_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sub_i64_high
|
||||
define <vscale x 2 x i64> @sub_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sub_imm_i64_high
|
||||
; CHECK: sub z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = sub <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; SQADD
|
||||
define <vscale x 16 x i8> @sqadd_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sqadd_i8_low
|
||||
define <vscale x 16 x i8> @subr_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: subr_imm_i8_low
|
||||
; CHECK: subr z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @subr_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: subr_imm_i16_low
|
||||
; CHECK: subr z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @subr_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: subr_imm_i16_high
|
||||
; CHECK: subr z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @subr_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: subr_imm_i32_low
|
||||
; CHECK: subr z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @subr_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: subr_imm_i32_high
|
||||
; CHECK: subr z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @subr_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: subr_imm_i64_low
|
||||
; CHECK: subr z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @subr_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: subr_imm_i64_high
|
||||
; CHECK: subr z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @sqadd_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i8_low
|
||||
; CHECK: sqadd z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqadd_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqadd_i16_low
|
||||
define <vscale x 8 x i16> @sqadd_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i16_low
|
||||
; CHECK: sqadd z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqadd_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqadd_i16_high
|
||||
define <vscale x 8 x i16> @sqadd_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i16_high
|
||||
; CHECK: sqadd z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqadd_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqadd_i32_low
|
||||
define <vscale x 4 x i32> @sqadd_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i32_low
|
||||
; CHECK: sqadd z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqadd_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqadd_i32_high
|
||||
define <vscale x 4 x i32> @sqadd_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i32_high
|
||||
; CHECK: sqadd z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sqadd_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqadd_i64_low
|
||||
define <vscale x 2 x i64> @sqadd_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i64_low
|
||||
; CHECK: sqadd z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sqadd_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqadd_i64_high
|
||||
define <vscale x 2 x i64> @sqadd_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqadd_imm_i64_high
|
||||
; CHECK: sqadd z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; UQADD
|
||||
define <vscale x 16 x i8> @uqadd_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: uqadd_i8_low
|
||||
define <vscale x 16 x i8> @uqadd_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i8_low
|
||||
; CHECK: uqadd z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqadd_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqadd_i16_low
|
||||
define <vscale x 8 x i16> @uqadd_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i16_low
|
||||
; CHECK: uqadd z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqadd_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqadd_i16_high
|
||||
define <vscale x 8 x i16> @uqadd_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i16_high
|
||||
; CHECK: uqadd z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqadd_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqadd_i32_low
|
||||
define <vscale x 4 x i32> @uqadd_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i32_low
|
||||
; CHECK: uqadd z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqadd_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqadd_i32_high
|
||||
define <vscale x 4 x i32> @uqadd_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i32_high
|
||||
; CHECK: uqadd z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @uqadd_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqadd_i64_low
|
||||
define <vscale x 2 x i64> @uqadd_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i64_low
|
||||
; CHECK: uqadd z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @uqadd_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqadd_i64_high
|
||||
define <vscale x 2 x i64> @uqadd_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqadd_imm_i64_high
|
||||
; CHECK: uqadd z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; SQSUB
|
||||
define <vscale x 16 x i8> @sqsub_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sqsub_i8_low
|
||||
define <vscale x 16 x i8> @sqsub_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i8_low
|
||||
; CHECK: sqsub z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqsub_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqsub_i16_low
|
||||
define <vscale x 8 x i16> @sqsub_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i16_low
|
||||
; CHECK: sqsub z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @sqsub_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqsub_i16_high
|
||||
define <vscale x 8 x i16> @sqsub_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i16_high
|
||||
; CHECK: sqsub z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqsub_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqsub_i32_low
|
||||
define <vscale x 4 x i32> @sqsub_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i32_low
|
||||
; CHECK: sqsub z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @sqsub_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqsub_i32_high
|
||||
define <vscale x 4 x i32> @sqsub_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i32_high
|
||||
; CHECK: sqsub z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sqsub_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqsub_i64_low
|
||||
define <vscale x 2 x i64> @sqsub_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i64_low
|
||||
; CHECK: sqsub z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @sqsub_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqsub_i64_high
|
||||
define <vscale x 2 x i64> @sqsub_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: sqsub_imm_i64_high
|
||||
; CHECK: sqsub z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; UQSUB
|
||||
define <vscale x 16 x i8> @uqsub_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: uqsub_i8_low
|
||||
define <vscale x 16 x i8> @uqsub_imm_i8_low(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i8_low
|
||||
; CHECK: uqsub z0.b, z0.b, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i32 30)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqsub_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqsub_i16_low
|
||||
define <vscale x 8 x i16> @uqsub_imm_i16_low(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i16_low
|
||||
; CHECK: uqsub z0.h, z0.h, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 30)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @uqsub_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqsub_i16_high
|
||||
define <vscale x 8 x i16> @uqsub_imm_i16_high(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i16_high
|
||||
; CHECK: uqsub z0.h, z0.h, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqsub_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqsub_i32_low
|
||||
define <vscale x 4 x i32> @uqsub_imm_i32_low(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i32_low
|
||||
; CHECK: uqsub z0.s, z0.s, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 30)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @uqsub_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqsub_i32_high
|
||||
define <vscale x 4 x i32> @uqsub_imm_i32_high(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i32_high
|
||||
; CHECK: uqsub z0.s, z0.s, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @uqsub_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqsub_i64_low
|
||||
define <vscale x 2 x i64> @uqsub_imm_i64_low(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i64_low
|
||||
; CHECK: uqsub z0.d, z0.d, #30
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 30)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @uqsub_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqsub_i64_high
|
||||
define <vscale x 2 x i64> @uqsub_imm_i64_high(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: uqsub_imm_i64_high
|
||||
; CHECK: uqsub z0.d, z0.d, #1024
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i32 1024)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8>, i32)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16>, i32)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32>, i32)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64>, i32)
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; SVE Logical Vector Immediate Unpredicated CodeGen
|
||||
;
|
||||
|
||||
; ORR
|
||||
define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: orr_i8:
|
||||
; CHECK: orr z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = or <vscale x 16 x i8> %a, %splat
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -19,9 +13,8 @@ define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i16> %a) {
|
|||
; CHECK-LABEL: orr_i16:
|
||||
; CHECK: orr z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = or <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
|
@ -29,9 +22,8 @@ define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i32> %a) {
|
|||
; CHECK-LABEL: orr_i32:
|
||||
; CHECK: orr z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = or <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
|
@ -39,20 +31,17 @@ define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i64> %a) {
|
|||
; CHECK-LABEL: orr_i64:
|
||||
; CHECK: orr z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = or <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; EOR
|
||||
define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: eor_i8:
|
||||
; CHECK: eor z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = xor <vscale x 16 x i8> %a, %splat
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -60,9 +49,8 @@ define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i16> %a) {
|
|||
; CHECK-LABEL: eor_i16:
|
||||
; CHECK: eor z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = xor <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
|
@ -70,9 +58,8 @@ define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i32> %a) {
|
|||
; CHECK-LABEL: eor_i32:
|
||||
; CHECK: eor z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = xor <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
|
@ -80,20 +67,17 @@ define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i64> %a) {
|
|||
; CHECK-LABEL: eor_i64:
|
||||
; CHECK: eor z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = xor <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; AND
|
||||
define <vscale x 16 x i8> @and_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: and_i8:
|
||||
; CHECK: and z0.b, z0.b, #0xf
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 15, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%res = and <vscale x 16 x i8> %a, %splat
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8> %a,
|
||||
i64 15)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
|
@ -101,9 +85,8 @@ define <vscale x 8 x i16> @and_i16(<vscale x 8 x i16> %a) {
|
|||
; CHECK-LABEL: and_i16:
|
||||
; CHECK: and z0.h, z0.h, #0xfc07
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 64519, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%res = and <vscale x 8 x i16> %a, %splat
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16> %a,
|
||||
i64 64519)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
|
@ -111,9 +94,8 @@ define <vscale x 4 x i32> @and_i32(<vscale x 4 x i32> %a) {
|
|||
; CHECK-LABEL: and_i32:
|
||||
; CHECK: and z0.s, z0.s, #0xffff00
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 16776960, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%res = and <vscale x 4 x i32> %a, %splat
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32> %a,
|
||||
i64 16776960)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
|
@ -121,8 +103,20 @@ define <vscale x 2 x i64> @and_i64(<vscale x 2 x i64> %a) {
|
|||
; CHECK-LABEL: and_i64:
|
||||
; CHECK: and z0.d, z0.d, #0xfffc000000000000
|
||||
; CHECK-NEXT: ret
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 18445618173802708992, i32 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%res = and <vscale x 2 x i64> %a, %splat
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64> %a,
|
||||
i64 18445618173802708992)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.and.imm.nxv16i8(<vscale x 16 x i8>, i64)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.and.imm.nxv8i16(<vscale x 8 x i16>, i64)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.and.imm.nxv4i32(<vscale x 4 x i32>, i64)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.and.imm.nxv2i64(<vscale x 2 x i64>, i64)
|
||||
|
|
Loading…
Reference in New Issue