2017-11-08 00:58:13 +08:00
|
|
|
//=- AArch64SVEInstrInfo.td - AArch64 SVE Instructions -*- tablegen -*-----=//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// AArch64 Scalable Vector Extension (SVE) Instruction definitions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
let Predicates = [HasSVE] in {
|
2018-07-04 20:58:46 +08:00
|
|
|
|
|
|
|
def RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr">;
|
|
|
|
def RDFFRS_PPz : sve_int_rdffr_pred<0b1, "rdffrs">;
|
|
|
|
def RDFFR_P : sve_int_rdffr_unpred<"rdffr">;
|
|
|
|
def SETFFR : sve_int_setffr<"setffr">;
|
|
|
|
def WRFFR : sve_int_wrffr<"wrffr">;
|
|
|
|
|
2017-11-08 00:58:13 +08:00
|
|
|
defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add">;
|
|
|
|
defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub">;
|
2018-07-03 17:48:22 +08:00
|
|
|
defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd">;
|
|
|
|
defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd">;
|
|
|
|
defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub">;
|
|
|
|
defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub">;
|
2017-12-20 19:02:42 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for AND, ORR, EOR and BIC instructions.
This patch addresses the following variants:
- bitmask immediate, e.g. 'and z0.d, z0.d, #0x6'.
- unpredicated data vectors, e.g. 'and z0.d, z1.d, z2.d'.
- predicated data vectors, e.g. 'and z0.d, p0/m, z0.d, z1.d'.
And also several aliases, such as:
- ORN, alias of ORR.
- EON, alias of EOR.
- BIC, alias of AND (immediate variant)
- MOV, alias of ORR (if unpredicated and source register operands are the same)
Reviewers: rengolin, huntergr, fhahn, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D47363
llvm-svn: 333414
2018-05-29 21:08:43 +08:00
|
|
|
def AND_ZZZ : sve_int_bin_cons_log<0b00, "and">;
|
|
|
|
def ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr">;
|
|
|
|
def EOR_ZZZ : sve_int_bin_cons_log<0b10, "eor">;
|
|
|
|
def BIC_ZZZ : sve_int_bin_cons_log<0b11, "bic">;
|
2018-02-06 21:13:21 +08:00
|
|
|
|
2018-07-04 22:05:33 +08:00
|
|
|
defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add">;
|
|
|
|
defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub">;
|
|
|
|
defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr">;
|
2018-01-10 01:01:27 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for AND, ORR, EOR and BIC instructions.
This patch addresses the following variants:
- bitmask immediate, e.g. 'and z0.d, z0.d, #0x6'.
- unpredicated data vectors, e.g. 'and z0.d, z1.d, z2.d'.
- predicated data vectors, e.g. 'and z0.d, p0/m, z0.d, z1.d'.
And also several aliases, such as:
- ORN, alias of ORR.
- EON, alias of EOR.
- BIC, alias of AND (immediate variant)
- MOV, alias of ORR (if unpredicated and source register operands are the same)
Reviewers: rengolin, huntergr, fhahn, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D47363
llvm-svn: 333414
2018-05-29 21:08:43 +08:00
|
|
|
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr">;
|
|
|
|
defm EOR_ZPmZ : sve_int_bin_pred_log<0b001, "eor">;
|
|
|
|
defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and">;
|
|
|
|
defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic">;
|
|
|
|
|
2018-05-29 18:39:49 +08:00
|
|
|
defm ADD_ZI : sve_int_arith_imm0<0b000, "add">;
|
|
|
|
defm SUB_ZI : sve_int_arith_imm0<0b001, "sub">;
|
2018-07-04 22:05:33 +08:00
|
|
|
defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr">;
|
2018-07-03 17:48:22 +08:00
|
|
|
defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd">;
|
|
|
|
defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd">;
|
|
|
|
defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub">;
|
|
|
|
defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub">;
|
2018-05-29 18:39:49 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for AND, ORR, EOR and BIC instructions.
This patch addresses the following variants:
- bitmask immediate, e.g. 'and z0.d, z0.d, #0x6'.
- unpredicated data vectors, e.g. 'and z0.d, z1.d, z2.d'.
- predicated data vectors, e.g. 'and z0.d, p0/m, z0.d, z1.d'.
And also several aliases, such as:
- ORN, alias of ORR.
- EON, alias of EOR.
- BIC, alias of AND (immediate variant)
- MOV, alias of ORR (if unpredicated and source register operands are the same)
Reviewers: rengolin, huntergr, fhahn, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D47363
llvm-svn: 333414
2018-05-29 21:08:43 +08:00
|
|
|
defm ORR_ZI : sve_int_log_imm<0b00, "orr", "orn">;
|
|
|
|
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon">;
|
|
|
|
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic">;
|
|
|
|
|
[AArch64][SVE] Asm: Support for signed/unsigned MIN/MAX/ABD
This patch implements the following varieties:
- Unpredicated signed max, e.g. smax z0.h, z1.h, #-128
- Unpredicated signed min, e.g. smin z0.h, z1.h, #-128
- Unpredicated unsigned max, e.g. umax z0.h, z1.h, #255
- Unpredicated unsigned min, e.g. umin z0.h, z1.h, #255
- Predicated signed max, e.g. smax z0.h, p0/m, z0.h, z1.h
- Predicated signed min, e.g. smin z0.h, p0/m, z0.h, z1.h
- Predicated signed abd, e.g. sabd z0.h, p0/m, z0.h, z1.h
- Predicated unsigned max, e.g. umax z0.h, p0/m, z0.h, z1.h
- Predicated unsigned min, e.g. umin z0.h, p0/m, z0.h, z1.h
- Predicated unsigned abd, e.g. uabd z0.h, p0/m, z0.h, z1.h
llvm-svn: 336317
2018-07-05 15:54:10 +08:00
|
|
|
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", simm8>;
|
|
|
|
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", simm8>;
|
|
|
|
defm UMAX_ZI : sve_int_arith_imm1<0b01, "umax", imm0_255>;
|
|
|
|
defm UMIN_ZI : sve_int_arith_imm1<0b11, "umin", imm0_255>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for predicated unary operations.
The patch includes support for the following instructions:
ABS z0.h, p0/m, z0.h
NEG z0.h, p0/m, z0.h
(S|U)XTB z0.h, p0/m, z0.h
(S|U)XTB z0.s, p0/m, z0.s
(S|U)XTB z0.d, p0/m, z0.d
(S|U)XTH z0.s, p0/m, z0.s
(S|U)XTH z0.d, p0/m, z0.d
(S|U)XTW z0.d, p0/m, z0.d
llvm-svn: 336204
2018-07-03 22:57:48 +08:00
|
|
|
defm SXTB_ZPmZ : sve_int_un_pred_arit_0_h<0b000, "sxtb">;
|
|
|
|
defm UXTB_ZPmZ : sve_int_un_pred_arit_0_h<0b001, "uxtb">;
|
|
|
|
defm SXTH_ZPmZ : sve_int_un_pred_arit_0_w<0b010, "sxth">;
|
|
|
|
defm UXTH_ZPmZ : sve_int_un_pred_arit_0_w<0b011, "uxth">;
|
|
|
|
defm SXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b100, "sxtw">;
|
|
|
|
defm UXTW_ZPmZ : sve_int_un_pred_arit_0_d<0b101, "uxtw">;
|
|
|
|
defm ABS_ZPmZ : sve_int_un_pred_arit_0< 0b110, "abs">;
|
|
|
|
defm NEG_ZPmZ : sve_int_un_pred_arit_0< 0b111, "neg">;
|
|
|
|
|
2018-07-10 22:05:55 +08:00
|
|
|
defm CLS_ZPmZ : sve_int_un_pred_arit_1< 0b000, "cls">;
|
|
|
|
defm CLZ_ZPmZ : sve_int_un_pred_arit_1< 0b001, "clz">;
|
|
|
|
defm CNT_ZPmZ : sve_int_un_pred_arit_1< 0b010, "cnt">;
|
|
|
|
defm CNOT_ZPmZ : sve_int_un_pred_arit_1< 0b011, "cnot">;
|
|
|
|
defm NOT_ZPmZ : sve_int_un_pred_arit_1< 0b110, "not">;
|
|
|
|
defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs">;
|
|
|
|
defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg">;
|
|
|
|
|
[AArch64][SVE] Asm: Support for signed/unsigned MIN/MAX/ABD
This patch implements the following varieties:
- Unpredicated signed max, e.g. smax z0.h, z1.h, #-128
- Unpredicated signed min, e.g. smin z0.h, z1.h, #-128
- Unpredicated unsigned max, e.g. umax z0.h, z1.h, #255
- Unpredicated unsigned min, e.g. umin z0.h, z1.h, #255
- Predicated signed max, e.g. smax z0.h, p0/m, z0.h, z1.h
- Predicated signed min, e.g. smin z0.h, p0/m, z0.h, z1.h
- Predicated signed abd, e.g. sabd z0.h, p0/m, z0.h, z1.h
- Predicated unsigned max, e.g. umax z0.h, p0/m, z0.h, z1.h
- Predicated unsigned min, e.g. umin z0.h, p0/m, z0.h, z1.h
- Predicated unsigned abd, e.g. uabd z0.h, p0/m, z0.h, z1.h
llvm-svn: 336317
2018-07-05 15:54:10 +08:00
|
|
|
defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax">;
|
|
|
|
defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax">;
|
|
|
|
defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin">;
|
|
|
|
defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin">;
|
|
|
|
defm SABD_ZPmZ : sve_int_bin_pred_arit_1<0b100, "sabd">;
|
|
|
|
defm UABD_ZPmZ : sve_int_bin_pred_arit_1<0b101, "uabd">;
|
|
|
|
|
2018-06-15 21:57:51 +08:00
|
|
|
defm FADD_ZPmI : sve_fp_2op_i_p_zds<0b000, "fadd", sve_fpimm_half_one>;
|
|
|
|
defm FMUL_ZPmI : sve_fp_2op_i_p_zds<0b010, "fmul", sve_fpimm_half_two>;
|
|
|
|
defm FMAX_ZPmI : sve_fp_2op_i_p_zds<0b110, "fmax", sve_fpimm_zero_one>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for FP Complex ADD/MLA.
The variants added in this patch are:
- Predicated Complex floating point ADD with rotate, e.g.
fcadd z0.h, p0/m, z0.h, z1.h, #90
- Predicated Complex floating point MLA with rotate, e.g.
fcmla z0.h, p0/m, z1.h, z2.h, #180
- Unpredicated Complex floating point MLA with rotate (indexed operand), e.g.
fcmla z0.h, p0/m, z1.h, z2.h[0], #180
Reviewers: rengolin, fhahn, SjoerdMeijer, samparker, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D48824
llvm-svn: 336210
2018-07-04 00:01:27 +08:00
|
|
|
defm FCADD_ZPmZ : sve_fp_fcadd<"fcadd">;
|
|
|
|
defm FCMLA_ZPmZZ : sve_fp_fcmla<"fcmla">;
|
2018-07-03 23:31:04 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for FP Complex ADD/MLA.
The variants added in this patch are:
- Predicated Complex floating point ADD with rotate, e.g.
fcadd z0.h, p0/m, z0.h, z1.h, #90
- Predicated Complex floating point MLA with rotate, e.g.
fcmla z0.h, p0/m, z1.h, z2.h, #180
- Unpredicated Complex floating point MLA with rotate (indexed operand), e.g.
fcmla z0.h, p0/m, z1.h, z2.h[0], #180
Reviewers: rengolin, fhahn, SjoerdMeijer, samparker, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D48824
llvm-svn: 336210
2018-07-04 00:01:27 +08:00
|
|
|
defm FCMLA_ZZZI : sve_fp_fcmla_by_indexed_elem<"fcmla">;
|
2018-07-03 23:31:04 +08:00
|
|
|
defm FMUL_ZZZI : sve_fp_fmul_by_indexed_elem<"fmul">;
|
|
|
|
|
2018-05-25 17:47:52 +08:00
|
|
|
// Splat immediate (unpredicated)
|
|
|
|
defm DUP_ZI : sve_int_dup_imm<"dup">;
|
2018-06-01 20:54:46 +08:00
|
|
|
defm FDUP_ZI : sve_int_dup_fpimm<"fdup">;
|
2018-06-01 15:25:46 +08:00
|
|
|
defm DUPM_ZI : sve_int_dup_mask_imm<"dupm">;
|
2018-05-25 17:47:52 +08:00
|
|
|
|
2018-06-04 13:40:46 +08:00
|
|
|
// Splat immediate (predicated)
|
2018-06-04 13:58:06 +08:00
|
|
|
defm CPY_ZPmI : sve_int_dup_imm_pred_merge<"cpy">;
|
|
|
|
defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy">;
|
|
|
|
defm FCPY_ZPmI : sve_int_dup_fpimm_pred<"fcpy">;
|
2018-06-04 13:40:46 +08:00
|
|
|
|
2018-06-04 14:40:55 +08:00
|
|
|
// Splat scalar register (unpredicated, GPR or vector + element index)
|
|
|
|
defm DUP_ZR : sve_int_perm_dup_r<"dup">;
|
|
|
|
defm DUP_ZZI : sve_int_perm_dup_i<"dup">;
|
|
|
|
|
2018-06-16 00:39:46 +08:00
|
|
|
// Splat scalar register (predicated)
|
|
|
|
defm CPY_ZPmR : sve_int_perm_cpy_r<"cpy">;
|
|
|
|
defm CPY_ZPmV : sve_int_perm_cpy_v<"cpy">;
|
|
|
|
|
2018-06-17 18:11:04 +08:00
|
|
|
// Select elements from either vector (predicated)
|
|
|
|
defm SEL_ZPZZ : sve_int_sel_vvv<"sel">;
|
|
|
|
|
2018-07-11 19:22:26 +08:00
|
|
|
defm COMPACT_ZPZ : sve_int_perm_compact<"compact">;
|
2018-07-13 16:51:57 +08:00
|
|
|
defm INSR_ZR : sve_int_perm_insrs<"insr">;
|
|
|
|
defm INSR_ZV : sve_int_perm_insrv<"insr">;
|
2018-07-11 19:22:26 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for bitwise operations on predicate vectors.
This patch adds support for instructions performing bitwise operations
on predicate vectors, including AND, BIC, EOR, NAND, NOR, ORN, ORR, and
their status flag setting variants ANDS, BICS, EORS, NANDS, ORNS, ORRS.
This patch also adds several aliases:
orr p0.b, p1/z, p1.b, p1.b => mov p0.b, p1.b
orrs p0.b, p1/z, p1.b, p1.b => movs p0.b, p1.b
and p0.b, p1/z, p2.b, p2.b => mov p0.b, p1/z, p2.b
ands p0.b, p1/z, p2.b, p2.b => movs p0.b, p1/z, p2.b
eor p0.b, p1/z, p2.b, p1.b => not p0.b, p1/z, p2.b
eors p0.b, p1/z, p2.b, p1.b => nots p0.b, p1/z, p2.b
llvm-svn: 334906
2018-06-17 18:48:21 +08:00
|
|
|
def AND_PPzPP : sve_int_pred_log<0b0000, "and">;
|
|
|
|
def BIC_PPzPP : sve_int_pred_log<0b0001, "bic">;
|
|
|
|
def EOR_PPzPP : sve_int_pred_log<0b0010, "eor">;
|
2018-06-17 18:11:04 +08:00
|
|
|
def SEL_PPPP : sve_int_pred_log<0b0011, "sel">;
|
[AArch64][SVE] Asm: Support for bitwise operations on predicate vectors.
This patch adds support for instructions performing bitwise operations
on predicate vectors, including AND, BIC, EOR, NAND, NOR, ORN, ORR, and
their status flag setting variants ANDS, BICS, EORS, NANDS, ORNS, ORRS.
This patch also adds several aliases:
orr p0.b, p1/z, p1.b, p1.b => mov p0.b, p1.b
orrs p0.b, p1/z, p1.b, p1.b => movs p0.b, p1.b
and p0.b, p1/z, p2.b, p2.b => mov p0.b, p1/z, p2.b
ands p0.b, p1/z, p2.b, p2.b => movs p0.b, p1/z, p2.b
eor p0.b, p1/z, p2.b, p1.b => not p0.b, p1/z, p2.b
eors p0.b, p1/z, p2.b, p1.b => nots p0.b, p1/z, p2.b
llvm-svn: 334906
2018-06-17 18:48:21 +08:00
|
|
|
def ANDS_PPzPP : sve_int_pred_log<0b0100, "ands">;
|
|
|
|
def BICS_PPzPP : sve_int_pred_log<0b0101, "bics">;
|
|
|
|
def EORS_PPzPP : sve_int_pred_log<0b0110, "eors">;
|
|
|
|
def ORR_PPzPP : sve_int_pred_log<0b1000, "orr">;
|
|
|
|
def ORN_PPzPP : sve_int_pred_log<0b1001, "orn">;
|
|
|
|
def NOR_PPzPP : sve_int_pred_log<0b1010, "nor">;
|
|
|
|
def NAND_PPzPP : sve_int_pred_log<0b1011, "nand">;
|
|
|
|
def ORRS_PPzPP : sve_int_pred_log<0b1100, "orrs">;
|
|
|
|
def ORNS_PPzPP : sve_int_pred_log<0b1101, "orns">;
|
|
|
|
def NORS_PPzPP : sve_int_pred_log<0b1110, "nors">;
|
|
|
|
def NANDS_PPzPP : sve_int_pred_log<0b1111, "nands">;
|
2018-06-17 18:11:04 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for LAST(A|B) and CLAST(A|B) instructions.
The LASTB and LASTA instructions extract the last active element,
or element after the last active, from the source vector.
The added variants are:
Scalar:
last(a|b) w0, p0, z0.b
last(a|b) w0, p0, z0.h
last(a|b) w0, p0, z0.s
last(a|b) x0, p0, z0.d
SIMD & FP Scalar:
last(a|b) b0, p0, z0.b
last(a|b) h0, p0, z0.h
last(a|b) s0, p0, z0.s
last(a|b) d0, p0, z0.d
The CLASTB and CLASTA conditionally extract the last or element after
the last active element from the source vector.
The added variants are:
Scalar:
clast(a|b) w0, p0, w0, z0.b
clast(a|b) w0, p0, w0, z0.h
clast(a|b) w0, p0, w0, z0.s
clast(a|b) x0, p0, x0, z0.d
SIMD & FP Scalar:
clast(a|b) b0, p0, b0, z0.b
clast(a|b) h0, p0, h0, z0.h
clast(a|b) s0, p0, s0, z0.s
clast(a|b) d0, p0, d0, z0.d
Vector:
clast(a|b) z0.b, p0, z0.b, z1.b
clast(a|b) z0.h, p0, z0.h, z1.h
clast(a|b) z0.s, p0, z0.s, z1.s
clast(a|b) z0.d, p0, z0.d, z1.d
Please refer to the architecture specification for more details on
the semantics of the added instructions.
llvm-svn: 336783
2018-07-11 18:08:00 +08:00
|
|
|
defm CLASTA_RPZ : sve_int_perm_clast_rz<0, "clasta">;
|
|
|
|
defm CLASTB_RPZ : sve_int_perm_clast_rz<1, "clastb">;
|
|
|
|
defm CLASTA_VPZ : sve_int_perm_clast_vz<0, "clasta">;
|
|
|
|
defm CLASTB_VPZ : sve_int_perm_clast_vz<1, "clastb">;
|
|
|
|
defm CLASTA_ZPZ : sve_int_perm_clast_zz<0, "clasta">;
|
|
|
|
defm CLASTB_ZPZ : sve_int_perm_clast_zz<1, "clastb">;
|
|
|
|
|
|
|
|
defm LASTA_RPZ : sve_int_perm_last_r<0, "lasta">;
|
|
|
|
defm LASTB_RPZ : sve_int_perm_last_r<1, "lastb">;
|
|
|
|
defm LASTA_VPZ : sve_int_perm_last_v<0, "lasta">;
|
|
|
|
defm LASTB_VPZ : sve_int_perm_last_v<1, "lastb">;
|
|
|
|
|
2018-04-13 22:41:36 +08:00
|
|
|
// continuous load with reg+immediate
|
|
|
|
defm LD1B_IMM : sve_mem_cld_si<0b0000, "ld1b", Z_b, ZPR8>;
|
|
|
|
defm LD1B_H_IMM : sve_mem_cld_si<0b0001, "ld1b", Z_h, ZPR16>;
|
|
|
|
defm LD1B_S_IMM : sve_mem_cld_si<0b0010, "ld1b", Z_s, ZPR32>;
|
|
|
|
defm LD1B_D_IMM : sve_mem_cld_si<0b0011, "ld1b", Z_d, ZPR64>;
|
|
|
|
defm LD1SW_D_IMM : sve_mem_cld_si<0b0100, "ld1sw", Z_d, ZPR64>;
|
|
|
|
defm LD1H_IMM : sve_mem_cld_si<0b0101, "ld1h", Z_h, ZPR16>;
|
|
|
|
defm LD1H_S_IMM : sve_mem_cld_si<0b0110, "ld1h", Z_s, ZPR32>;
|
|
|
|
defm LD1H_D_IMM : sve_mem_cld_si<0b0111, "ld1h", Z_d, ZPR64>;
|
|
|
|
defm LD1SH_D_IMM : sve_mem_cld_si<0b1000, "ld1sh", Z_d, ZPR64>;
|
|
|
|
defm LD1SH_S_IMM : sve_mem_cld_si<0b1001, "ld1sh", Z_s, ZPR32>;
|
|
|
|
defm LD1W_IMM : sve_mem_cld_si<0b1010, "ld1w", Z_s, ZPR32>;
|
|
|
|
defm LD1W_D_IMM : sve_mem_cld_si<0b1011, "ld1w", Z_d, ZPR64>;
|
|
|
|
defm LD1SB_D_IMM : sve_mem_cld_si<0b1100, "ld1sb", Z_d, ZPR64>;
|
|
|
|
defm LD1SB_S_IMM : sve_mem_cld_si<0b1101, "ld1sb", Z_s, ZPR32>;
|
|
|
|
defm LD1SB_H_IMM : sve_mem_cld_si<0b1110, "ld1sb", Z_h, ZPR16>;
|
|
|
|
defm LD1D_IMM : sve_mem_cld_si<0b1111, "ld1d", Z_d, ZPR64>;
|
[AArch64][SVE] Asm: Support for contiguous ST1 (scalar+imm) store instructions.
Summary:
Added instructions for contiguous stores, ST1, with scalar+imm addressing
modes and corresponding tests. The patch also adds parsing of
'mul vl' as needed for the VL-scaled immediate.
This is patch [6/6] in a series to add assembler/disassembler support for
SVE's contiguous ST1 (scalar+imm) instructions.
Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro
Reviewed By: rengolin
Subscribers: tschuett, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45432
llvm-svn: 330014
2018-04-13 20:56:14 +08:00
|
|
|
|
2018-05-08 18:46:55 +08:00
|
|
|
// LD1R loads (splat scalar to vector)
|
|
|
|
defm LD1RB_IMM : sve_mem_ld_dup<0b00, 0b00, "ld1rb", Z_b, ZPR8, uimm6s1>;
|
|
|
|
defm LD1RB_H_IMM : sve_mem_ld_dup<0b00, 0b01, "ld1rb", Z_h, ZPR16, uimm6s1>;
|
|
|
|
defm LD1RB_S_IMM : sve_mem_ld_dup<0b00, 0b10, "ld1rb", Z_s, ZPR32, uimm6s1>;
|
|
|
|
defm LD1RB_D_IMM : sve_mem_ld_dup<0b00, 0b11, "ld1rb", Z_d, ZPR64, uimm6s1>;
|
|
|
|
defm LD1RSW_IMM : sve_mem_ld_dup<0b01, 0b00, "ld1rsw", Z_d, ZPR64, uimm6s4>;
|
|
|
|
defm LD1RH_IMM : sve_mem_ld_dup<0b01, 0b01, "ld1rh", Z_h, ZPR16, uimm6s2>;
|
|
|
|
defm LD1RH_S_IMM : sve_mem_ld_dup<0b01, 0b10, "ld1rh", Z_s, ZPR32, uimm6s2>;
|
|
|
|
defm LD1RH_D_IMM : sve_mem_ld_dup<0b01, 0b11, "ld1rh", Z_d, ZPR64, uimm6s2>;
|
|
|
|
defm LD1RSH_D_IMM : sve_mem_ld_dup<0b10, 0b00, "ld1rsh", Z_d, ZPR64, uimm6s2>;
|
|
|
|
defm LD1RSH_S_IMM : sve_mem_ld_dup<0b10, 0b01, "ld1rsh", Z_s, ZPR32, uimm6s2>;
|
|
|
|
defm LD1RW_IMM : sve_mem_ld_dup<0b10, 0b10, "ld1rw", Z_s, ZPR32, uimm6s4>;
|
|
|
|
defm LD1RW_D_IMM : sve_mem_ld_dup<0b10, 0b11, "ld1rw", Z_d, ZPR64, uimm6s4>;
|
|
|
|
defm LD1RSB_D_IMM : sve_mem_ld_dup<0b11, 0b00, "ld1rsb", Z_d, ZPR64, uimm6s1>;
|
|
|
|
defm LD1RSB_S_IMM : sve_mem_ld_dup<0b11, 0b01, "ld1rsb", Z_s, ZPR32, uimm6s1>;
|
|
|
|
defm LD1RSB_H_IMM : sve_mem_ld_dup<0b11, 0b10, "ld1rsb", Z_h, ZPR16, uimm6s1>;
|
|
|
|
defm LD1RD_IMM : sve_mem_ld_dup<0b11, 0b11, "ld1rd", Z_d, ZPR64, uimm6s8>;
|
|
|
|
|
|
|
|
// LD1RQ loads (load quadword-vector and splat to scalable vector)
|
2018-05-02 16:49:08 +08:00
|
|
|
defm LD1RQ_B_IMM : sve_mem_ldqr_si<0b00, "ld1rqb", Z_b, ZPR8>;
|
|
|
|
defm LD1RQ_H_IMM : sve_mem_ldqr_si<0b01, "ld1rqh", Z_h, ZPR16>;
|
|
|
|
defm LD1RQ_W_IMM : sve_mem_ldqr_si<0b10, "ld1rqw", Z_s, ZPR32>;
|
|
|
|
defm LD1RQ_D_IMM : sve_mem_ldqr_si<0b11, "ld1rqd", Z_d, ZPR64>;
|
|
|
|
defm LD1RQ_B : sve_mem_ldqr_ss<0b00, "ld1rqb", Z_b, ZPR8, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1RQ_H : sve_mem_ldqr_ss<0b01, "ld1rqh", Z_h, ZPR16, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1RQ_W : sve_mem_ldqr_ss<0b10, "ld1rqw", Z_s, ZPR32, GPR64NoXZRshifted32>;
|
|
|
|
defm LD1RQ_D : sve_mem_ldqr_ss<0b11, "ld1rqd", Z_d, ZPR64, GPR64NoXZRshifted64>;
|
|
|
|
|
2018-04-20 20:52:01 +08:00
|
|
|
// continuous load with reg+reg addressing.
|
|
|
|
defm LD1B : sve_mem_cld_ss<0b0000, "ld1b", Z_b, ZPR8, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1B_H : sve_mem_cld_ss<0b0001, "ld1b", Z_h, ZPR16, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1B_S : sve_mem_cld_ss<0b0010, "ld1b", Z_s, ZPR32, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1B_D : sve_mem_cld_ss<0b0011, "ld1b", Z_d, ZPR64, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1SW_D : sve_mem_cld_ss<0b0100, "ld1sw", Z_d, ZPR64, GPR64NoXZRshifted32>;
|
|
|
|
defm LD1H : sve_mem_cld_ss<0b0101, "ld1h", Z_h, ZPR16, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1H_S : sve_mem_cld_ss<0b0110, "ld1h", Z_s, ZPR32, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1H_D : sve_mem_cld_ss<0b0111, "ld1h", Z_d, ZPR64, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1SH_D : sve_mem_cld_ss<0b1000, "ld1sh", Z_d, ZPR64, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1SH_S : sve_mem_cld_ss<0b1001, "ld1sh", Z_s, ZPR32, GPR64NoXZRshifted16>;
|
|
|
|
defm LD1W : sve_mem_cld_ss<0b1010, "ld1w", Z_s, ZPR32, GPR64NoXZRshifted32>;
|
|
|
|
defm LD1W_D : sve_mem_cld_ss<0b1011, "ld1w", Z_d, ZPR64, GPR64NoXZRshifted32>;
|
|
|
|
defm LD1SB_D : sve_mem_cld_ss<0b1100, "ld1sb", Z_d, ZPR64, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1SB_S : sve_mem_cld_ss<0b1101, "ld1sb", Z_s, ZPR32, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1SB_H : sve_mem_cld_ss<0b1110, "ld1sb", Z_h, ZPR16, GPR64NoXZRshifted8>;
|
|
|
|
defm LD1D : sve_mem_cld_ss<0b1111, "ld1d", Z_d, ZPR64, GPR64NoXZRshifted64>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for contiguous, non-faulting LDNF1 (scalar+imm) load instructions
Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro
Reviewed By: rengolin
Subscribers: tschuett, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45684
llvm-svn: 330583
2018-04-23 20:43:19 +08:00
|
|
|
// non-faulting continuous load with reg+immediate
|
|
|
|
defm LDNF1B_IMM : sve_mem_cldnf_si<0b0000, "ldnf1b", Z_b, ZPR8>;
|
|
|
|
defm LDNF1B_H_IMM : sve_mem_cldnf_si<0b0001, "ldnf1b", Z_h, ZPR16>;
|
|
|
|
defm LDNF1B_S_IMM : sve_mem_cldnf_si<0b0010, "ldnf1b", Z_s, ZPR32>;
|
|
|
|
defm LDNF1B_D_IMM : sve_mem_cldnf_si<0b0011, "ldnf1b", Z_d, ZPR64>;
|
|
|
|
defm LDNF1SW_D_IMM : sve_mem_cldnf_si<0b0100, "ldnf1sw", Z_d, ZPR64>;
|
|
|
|
defm LDNF1H_IMM : sve_mem_cldnf_si<0b0101, "ldnf1h", Z_h, ZPR16>;
|
|
|
|
defm LDNF1H_S_IMM : sve_mem_cldnf_si<0b0110, "ldnf1h", Z_s, ZPR32>;
|
|
|
|
defm LDNF1H_D_IMM : sve_mem_cldnf_si<0b0111, "ldnf1h", Z_d, ZPR64>;
|
|
|
|
defm LDNF1SH_D_IMM : sve_mem_cldnf_si<0b1000, "ldnf1sh", Z_d, ZPR64>;
|
|
|
|
defm LDNF1SH_S_IMM : sve_mem_cldnf_si<0b1001, "ldnf1sh", Z_s, ZPR32>;
|
|
|
|
defm LDNF1W_IMM : sve_mem_cldnf_si<0b1010, "ldnf1w", Z_s, ZPR32>;
|
|
|
|
defm LDNF1W_D_IMM : sve_mem_cldnf_si<0b1011, "ldnf1w", Z_d, ZPR64>;
|
|
|
|
defm LDNF1SB_D_IMM : sve_mem_cldnf_si<0b1100, "ldnf1sb", Z_d, ZPR64>;
|
|
|
|
defm LDNF1SB_S_IMM : sve_mem_cldnf_si<0b1101, "ldnf1sb", Z_s, ZPR32>;
|
|
|
|
defm LDNF1SB_H_IMM : sve_mem_cldnf_si<0b1110, "ldnf1sb", Z_h, ZPR16>;
|
|
|
|
defm LDNF1D_IMM : sve_mem_cldnf_si<0b1111, "ldnf1d", Z_d, ZPR64>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for contiguous, first-faulting LDFF1 (scalar+scalar) load instructions.
Reviewers: fhahn, rengolin, samparker, SjoerdMeijer, t.p.northover, echristo, evandro, javed.absar
Reviewed By: rengolin
Subscribers: tschuett, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45946
llvm-svn: 330697
2018-04-24 16:59:08 +08:00
|
|
|
// First-faulting loads with reg+reg addressing.
|
|
|
|
defm LDFF1B : sve_mem_cldff_ss<0b0000, "ldff1b", Z_b, ZPR8, GPR64shifted8>;
|
|
|
|
defm LDFF1B_H : sve_mem_cldff_ss<0b0001, "ldff1b", Z_h, ZPR16, GPR64shifted8>;
|
|
|
|
defm LDFF1B_S : sve_mem_cldff_ss<0b0010, "ldff1b", Z_s, ZPR32, GPR64shifted8>;
|
|
|
|
defm LDFF1B_D : sve_mem_cldff_ss<0b0011, "ldff1b", Z_d, ZPR64, GPR64shifted8>;
|
|
|
|
defm LDFF1SW_D : sve_mem_cldff_ss<0b0100, "ldff1sw", Z_d, ZPR64, GPR64shifted32>;
|
|
|
|
defm LDFF1H : sve_mem_cldff_ss<0b0101, "ldff1h", Z_h, ZPR16, GPR64shifted16>;
|
|
|
|
defm LDFF1H_S : sve_mem_cldff_ss<0b0110, "ldff1h", Z_s, ZPR32, GPR64shifted16>;
|
|
|
|
defm LDFF1H_D : sve_mem_cldff_ss<0b0111, "ldff1h", Z_d, ZPR64, GPR64shifted16>;
|
|
|
|
defm LDFF1SH_D : sve_mem_cldff_ss<0b1000, "ldff1sh", Z_d, ZPR64, GPR64shifted16>;
|
|
|
|
defm LDFF1SH_S : sve_mem_cldff_ss<0b1001, "ldff1sh", Z_s, ZPR32, GPR64shifted16>;
|
|
|
|
defm LDFF1W : sve_mem_cldff_ss<0b1010, "ldff1w", Z_s, ZPR32, GPR64shifted32>;
|
|
|
|
defm LDFF1W_D : sve_mem_cldff_ss<0b1011, "ldff1w", Z_d, ZPR64, GPR64shifted32>;
|
|
|
|
defm LDFF1SB_D : sve_mem_cldff_ss<0b1100, "ldff1sb", Z_d, ZPR64, GPR64shifted8>;
|
|
|
|
defm LDFF1SB_S : sve_mem_cldff_ss<0b1101, "ldff1sb", Z_s, ZPR32, GPR64shifted8>;
|
|
|
|
defm LDFF1SB_H : sve_mem_cldff_ss<0b1110, "ldff1sb", Z_h, ZPR16, GPR64shifted8>;
|
|
|
|
defm LDFF1D : sve_mem_cldff_ss<0b1111, "ldff1d", Z_d, ZPR64, GPR64shifted64>;
|
|
|
|
|
2018-04-16 15:09:29 +08:00
|
|
|
// LD(2|3|4) structured loads with reg+immediate
|
2018-04-30 02:18:21 +08:00
|
|
|
defm LD2B_IMM : sve_mem_eld_si<0b00, 0b01, ZZ_b, "ld2b", simm4s2>;
|
|
|
|
defm LD3B_IMM : sve_mem_eld_si<0b00, 0b10, ZZZ_b, "ld3b", simm4s3>;
|
|
|
|
defm LD4B_IMM : sve_mem_eld_si<0b00, 0b11, ZZZZ_b, "ld4b", simm4s4>;
|
|
|
|
defm LD2H_IMM : sve_mem_eld_si<0b01, 0b01, ZZ_h, "ld2h", simm4s2>;
|
|
|
|
defm LD3H_IMM : sve_mem_eld_si<0b01, 0b10, ZZZ_h, "ld3h", simm4s3>;
|
|
|
|
defm LD4H_IMM : sve_mem_eld_si<0b01, 0b11, ZZZZ_h, "ld4h", simm4s4>;
|
|
|
|
defm LD2W_IMM : sve_mem_eld_si<0b10, 0b01, ZZ_s, "ld2w", simm4s2>;
|
|
|
|
defm LD3W_IMM : sve_mem_eld_si<0b10, 0b10, ZZZ_s, "ld3w", simm4s3>;
|
|
|
|
defm LD4W_IMM : sve_mem_eld_si<0b10, 0b11, ZZZZ_s, "ld4w", simm4s4>;
|
|
|
|
defm LD2D_IMM : sve_mem_eld_si<0b11, 0b01, ZZ_d, "ld2d", simm4s2>;
|
|
|
|
defm LD3D_IMM : sve_mem_eld_si<0b11, 0b10, ZZZ_d, "ld3d", simm4s3>;
|
|
|
|
defm LD4D_IMM : sve_mem_eld_si<0b11, 0b11, ZZZZ_d, "ld4d", simm4s4>;
|
2018-04-16 15:09:29 +08:00
|
|
|
|
2018-05-16 17:16:20 +08:00
|
|
|
// LD(2|3|4) structured loads (register + register)
|
|
|
|
def LD2B : sve_mem_eld_ss<0b00, 0b01, ZZ_b, "ld2b", GPR64NoXZRshifted8>;
|
|
|
|
def LD3B : sve_mem_eld_ss<0b00, 0b10, ZZZ_b, "ld3b", GPR64NoXZRshifted8>;
|
|
|
|
def LD4B : sve_mem_eld_ss<0b00, 0b11, ZZZZ_b, "ld4b", GPR64NoXZRshifted8>;
|
|
|
|
def LD2H : sve_mem_eld_ss<0b01, 0b01, ZZ_h, "ld2h", GPR64NoXZRshifted16>;
|
|
|
|
def LD3H : sve_mem_eld_ss<0b01, 0b10, ZZZ_h, "ld3h", GPR64NoXZRshifted16>;
|
|
|
|
def LD4H : sve_mem_eld_ss<0b01, 0b11, ZZZZ_h, "ld4h", GPR64NoXZRshifted16>;
|
|
|
|
def LD2W : sve_mem_eld_ss<0b10, 0b01, ZZ_s, "ld2w", GPR64NoXZRshifted32>;
|
|
|
|
def LD3W : sve_mem_eld_ss<0b10, 0b10, ZZZ_s, "ld3w", GPR64NoXZRshifted32>;
|
|
|
|
def LD4W : sve_mem_eld_ss<0b10, 0b11, ZZZZ_s, "ld4w", GPR64NoXZRshifted32>;
|
|
|
|
def LD2D : sve_mem_eld_ss<0b11, 0b01, ZZ_d, "ld2d", GPR64NoXZRshifted64>;
|
|
|
|
def LD3D : sve_mem_eld_ss<0b11, 0b10, ZZZ_d, "ld3d", GPR64NoXZRshifted64>;
|
|
|
|
def LD4D : sve_mem_eld_ss<0b11, 0b11, ZZZZ_d, "ld4d", GPR64NoXZRshifted64>;
|
|
|
|
|
2018-04-26 16:19:53 +08:00
|
|
|
// Gathers using unscaled 32-bit offsets, e.g.
|
|
|
|
// ld1h z0.s, p0/z, [x0, z0.s, uxtw]
|
[AArch64][SVE] Asm: Improve diagnostics for gather loads.
This patch extends the 'isSVEVectorRegWithShiftExtend' function to
improve diagnostics for SVE's gather load (scalar + vector) addressing
modes. Instead of always suggesting the 'unscaled' addressing mode,
the use of DiagnosticPredicate enables a more specific error message
in the context where the scaling is incorrect. For example:
ld1h z0.d, p0/z, [x0, z0.d, lsl #2]
^
shift amount should be '1'
Instead of suggesting the packed, unscaled addressing mode:
expected 'z[0..31].d, (uxtw|sxtw)'
the assembler now suggests using the proper scaling:
expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'
Reviewers: fhahn, rengolin, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D46124
llvm-svn: 331162
2018-04-30 15:24:38 +08:00
|
|
|
defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
|
|
|
defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
|
|
|
defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
|
|
|
defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
2018-04-26 16:19:53 +08:00
|
|
|
defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm GLD1W : sve_mem_32b_gld_vs_32_unscaled<0b1010, "ld1w", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm GLDFF1W : sve_mem_32b_gld_vs_32_unscaled<0b1011, "ldff1w", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
|
|
|
|
// Gathers using scaled 32-bit offsets, e.g.
|
|
|
|
// ld1h z0.s, p0/z, [x0, z0.s, uxtw #1]
|
|
|
|
defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm GLD1W : sve_mem_32b_gld_sv_32_scaled<0b1010, "ld1w", ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
|
|
|
defm GLDFF1W : sve_mem_32b_gld_sv_32_scaled<0b1011, "ldff1w", ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
|
|
|
|
2018-04-30 01:33:38 +08:00
|
|
|
// Gathers using scaled 32-bit pointers with offset, e.g.
|
|
|
|
// ld1h z0.s, p0/z, [z0.s, #16]
|
|
|
|
defm GLD1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0000, "ld1sb", imm0_31>;
|
|
|
|
defm GLDFF1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0001, "ldff1sb", imm0_31>;
|
|
|
|
defm GLD1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0010, "ld1b", imm0_31>;
|
|
|
|
defm GLDFF1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0011, "ldff1b", imm0_31>;
|
|
|
|
defm GLD1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0100, "ld1sh", uimm5s2>;
|
|
|
|
defm GLDFF1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0101, "ldff1sh", uimm5s2>;
|
|
|
|
defm GLD1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0110, "ld1h", uimm5s2>;
|
|
|
|
defm GLDFF1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0111, "ldff1h", uimm5s2>;
|
|
|
|
defm GLD1W : sve_mem_32b_gld_vi_32_ptrs<0b1010, "ld1w", uimm5s4>;
|
|
|
|
defm GLDFF1W : sve_mem_32b_gld_vi_32_ptrs<0b1011, "ldff1w", uimm5s4>;
|
|
|
|
|
|
|
|
// Gathers using scaled 64-bit pointers with offset, e.g.
|
|
|
|
// ld1h z0.d, p0/z, [z0.d, #16]
|
|
|
|
defm GLD1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0000, "ld1sb", imm0_31>;
|
|
|
|
defm GLDFF1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0001, "ldff1sb", imm0_31>;
|
|
|
|
defm GLD1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0010, "ld1b", imm0_31>;
|
|
|
|
defm GLDFF1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0011, "ldff1b", imm0_31>;
|
|
|
|
defm GLD1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0100, "ld1sh", uimm5s2>;
|
|
|
|
defm GLDFF1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0101, "ldff1sh", uimm5s2>;
|
|
|
|
defm GLD1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0110, "ld1h", uimm5s2>;
|
|
|
|
defm GLDFF1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0111, "ldff1h", uimm5s2>;
|
|
|
|
defm GLD1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1000, "ld1sw", uimm5s4>;
|
|
|
|
defm GLDFF1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1001, "ldff1sw", uimm5s4>;
|
|
|
|
defm GLD1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1010, "ld1w", uimm5s4>;
|
|
|
|
defm GLDFF1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1011, "ldff1w", uimm5s4>;
|
|
|
|
defm GLD1D : sve_mem_64b_gld_vi_64_ptrs<0b1110, "ld1d", uimm5s8>;
|
|
|
|
defm GLDFF1D : sve_mem_64b_gld_vi_64_ptrs<0b1111, "ldff1d", uimm5s8>;
|
|
|
|
|
2018-04-26 16:19:53 +08:00
|
|
|
// Gathers using unscaled 64-bit offsets, e.g.
|
|
|
|
// ld1h z0.d, p0/z, [x0, z0.d]
|
|
|
|
defm GLD1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0000, "ld1sb">;
|
|
|
|
defm GLDFF1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0001, "ldff1sb">;
|
|
|
|
defm GLD1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0010, "ld1b">;
|
|
|
|
defm GLDFF1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0011, "ldff1b">;
|
|
|
|
defm GLD1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0100, "ld1sh">;
|
|
|
|
defm GLDFF1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0101, "ldff1sh">;
|
|
|
|
defm GLD1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0110, "ld1h">;
|
|
|
|
defm GLDFF1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0111, "ldff1h">;
|
|
|
|
defm GLD1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1000, "ld1sw">;
|
|
|
|
defm GLDFF1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1001, "ldff1sw">;
|
|
|
|
defm GLD1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1010, "ld1w">;
|
|
|
|
defm GLDFF1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1011, "ldff1w">;
|
|
|
|
defm GLD1D : sve_mem_64b_gld_vs2_64_unscaled<0b1110, "ld1d">;
|
|
|
|
defm GLDFF1D : sve_mem_64b_gld_vs2_64_unscaled<0b1111, "ldff1d">;
|
|
|
|
|
|
|
|
// Gathers using scaled 64-bit offsets, e.g.
|
|
|
|
// ld1h z0.d, p0/z, [x0, z0.d, lsl #1]
|
|
|
|
defm GLD1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0100, "ld1sh", ZPR64ExtLSL16>;
|
|
|
|
defm GLDFF1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0101, "ldff1sh", ZPR64ExtLSL16>;
|
|
|
|
defm GLD1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0110, "ld1h", ZPR64ExtLSL16>;
|
|
|
|
defm GLDFF1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0111, "ldff1h", ZPR64ExtLSL16>;
|
|
|
|
defm GLD1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1000, "ld1sw", ZPR64ExtLSL32>;
|
|
|
|
defm GLDFF1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1001, "ldff1sw", ZPR64ExtLSL32>;
|
|
|
|
defm GLD1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1010, "ld1w", ZPR64ExtLSL32>;
|
|
|
|
defm GLDFF1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1011, "ldff1w", ZPR64ExtLSL32>;
|
|
|
|
defm GLD1D : sve_mem_64b_gld_sv2_64_scaled<0b1110, "ld1d", ZPR64ExtLSL64>;
|
|
|
|
defm GLDFF1D : sve_mem_64b_gld_sv2_64_scaled<0b1111, "ldff1d", ZPR64ExtLSL64>;
|
|
|
|
|
|
|
|
// Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
|
|
|
// ld1h z0.d, p0/z, [x0, z0.d, uxtw]
|
[AArch64][SVE] Asm: Improve diagnostics for gather loads.
This patch extends the 'isSVEVectorRegWithShiftExtend' function to
improve diagnostics for SVE's gather load (scalar + vector) addressing
modes. Instead of always suggesting the 'unscaled' addressing mode,
the use of DiagnosticPredicate enables a more specific error message
in the context where the scaling is incorrect. For example:
ld1h z0.d, p0/z, [x0, z0.d, lsl #2]
^
shift amount should be '1'
Instead of suggesting the packed, unscaled addressing mode:
expected 'z[0..31].d, (uxtw|sxtw)'
the assembler now suggests using the proper scaling:
expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'
Reviewers: fhahn, rengolin, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D46124
llvm-svn: 331162
2018-04-30 15:24:38 +08:00
|
|
|
defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
|
|
|
defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
|
|
|
defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
|
|
|
defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
2018-04-26 16:19:53 +08:00
|
|
|
defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLD1D : sve_mem_64b_gld_vs_32_unscaled<0b1110, "ld1d", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm GLDFF1D : sve_mem_64b_gld_vs_32_unscaled<0b1111, "ldff1d", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
|
|
|
|
// Gathers using scaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
|
|
|
// ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
|
|
|
|
defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh",ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw",ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
|
|
|
defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
|
|
|
|
2018-05-02 19:48:49 +08:00
|
|
|
// Non-temporal contiguous loads (register + immediate)
|
|
|
|
defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>;
|
|
|
|
defm LDNT1H_ZRI : sve_mem_cldnt_si<0b01, "ldnt1h", Z_h, ZPR16>;
|
|
|
|
defm LDNT1W_ZRI : sve_mem_cldnt_si<0b10, "ldnt1w", Z_s, ZPR32>;
|
|
|
|
defm LDNT1D_ZRI : sve_mem_cldnt_si<0b11, "ldnt1d", Z_d, ZPR64>;
|
|
|
|
|
|
|
|
// Non-temporal contiguous loads (register + register)
|
|
|
|
defm LDNT1B_ZRR : sve_mem_cldnt_ss<0b00, "ldnt1b", Z_b, ZPR8, GPR64NoXZRshifted8>;
|
|
|
|
defm LDNT1H_ZRR : sve_mem_cldnt_ss<0b01, "ldnt1h", Z_h, ZPR16, GPR64NoXZRshifted16>;
|
|
|
|
defm LDNT1W_ZRR : sve_mem_cldnt_ss<0b10, "ldnt1w", Z_s, ZPR32, GPR64NoXZRshifted32>;
|
|
|
|
defm LDNT1D_ZRR : sve_mem_cldnt_ss<0b11, "ldnt1d", Z_d, ZPR64, GPR64NoXZRshifted64>;
|
|
|
|
|
2018-04-26 16:19:53 +08:00
|
|
|
// contiguous store with immediates
|
[AArch64][SVE] Asm: Support for contiguous ST1 (scalar+imm) store instructions.
Summary:
Added instructions for contiguous stores, ST1, with scalar+imm addressing
modes and corresponding tests. The patch also adds parsing of
'mul vl' as needed for the VL-scaled immediate.
This is patch [6/6] in a series to add assembler/disassembler support for
SVE's contiguous ST1 (scalar+imm) instructions.
Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro
Reviewed By: rengolin
Subscribers: tschuett, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45432
llvm-svn: 330014
2018-04-13 20:56:14 +08:00
|
|
|
defm ST1B_IMM : sve_mem_cst_si<0b00, 0b00, "st1b", Z_b, ZPR8>;
|
|
|
|
defm ST1B_H_IMM : sve_mem_cst_si<0b00, 0b01, "st1b", Z_h, ZPR16>;
|
|
|
|
defm ST1B_S_IMM : sve_mem_cst_si<0b00, 0b10, "st1b", Z_s, ZPR32>;
|
|
|
|
defm ST1B_D_IMM : sve_mem_cst_si<0b00, 0b11, "st1b", Z_d, ZPR64>;
|
|
|
|
defm ST1H_IMM : sve_mem_cst_si<0b01, 0b01, "st1h", Z_h, ZPR16>;
|
|
|
|
defm ST1H_S_IMM : sve_mem_cst_si<0b01, 0b10, "st1h", Z_s, ZPR32>;
|
|
|
|
defm ST1H_D_IMM : sve_mem_cst_si<0b01, 0b11, "st1h", Z_d, ZPR64>;
|
|
|
|
defm ST1W_IMM : sve_mem_cst_si<0b10, 0b10, "st1w", Z_s, ZPR32>;
|
|
|
|
defm ST1W_D_IMM : sve_mem_cst_si<0b10, 0b11, "st1w", Z_d, ZPR64>;
|
|
|
|
defm ST1D_IMM : sve_mem_cst_si<0b11, 0b11, "st1d", Z_d, ZPR64>;
|
|
|
|
|
2018-05-01 21:36:03 +08:00
|
|
|
// contiguous store with reg+reg addressing.
|
|
|
|
defm ST1B : sve_mem_cst_ss<0b0000, "st1b", Z_b, ZPR8, GPR64NoXZRshifted8>;
|
|
|
|
defm ST1B_H : sve_mem_cst_ss<0b0001, "st1b", Z_h, ZPR16, GPR64NoXZRshifted8>;
|
|
|
|
defm ST1B_S : sve_mem_cst_ss<0b0010, "st1b", Z_s, ZPR32, GPR64NoXZRshifted8>;
|
|
|
|
defm ST1B_D : sve_mem_cst_ss<0b0011, "st1b", Z_d, ZPR64, GPR64NoXZRshifted8>;
|
|
|
|
defm ST1H : sve_mem_cst_ss<0b0101, "st1h", Z_h, ZPR16, GPR64NoXZRshifted16>;
|
|
|
|
defm ST1H_S : sve_mem_cst_ss<0b0110, "st1h", Z_s, ZPR32, GPR64NoXZRshifted16>;
|
|
|
|
defm ST1H_D : sve_mem_cst_ss<0b0111, "st1h", Z_d, ZPR64, GPR64NoXZRshifted16>;
|
|
|
|
defm ST1W : sve_mem_cst_ss<0b1010, "st1w", Z_s, ZPR32, GPR64NoXZRshifted32>;
|
|
|
|
defm ST1W_D : sve_mem_cst_ss<0b1011, "st1w", Z_d, ZPR64, GPR64NoXZRshifted32>;
|
|
|
|
defm ST1D : sve_mem_cst_ss<0b1111, "st1d", Z_d, ZPR64, GPR64NoXZRshifted64>;
|
|
|
|
|
2018-05-02 21:00:30 +08:00
|
|
|
// Scatters using unscaled 32-bit offsets, e.g.
|
|
|
|
// st1h z0.s, p0, [x0, z0.s, uxtw]
|
|
|
|
// and unpacked:
|
|
|
|
// st1h z0.d, p0, [x0, z0.d, uxtw]
|
|
|
|
defm SST1B_D : sve_mem_sst_sv_32_unscaled<0b000, "st1b", Z_d, ZPR64, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
|
|
|
defm SST1B_S : sve_mem_sst_sv_32_unscaled<0b001, "st1b", Z_s, ZPR32, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
|
|
|
defm SST1H_D : sve_mem_sst_sv_32_unscaled<0b010, "st1h", Z_d, ZPR64, ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm SST1H_S : sve_mem_sst_sv_32_unscaled<0b011, "st1h", Z_s, ZPR32, ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm SST1W_D : sve_mem_sst_sv_32_unscaled<0b100, "st1w", Z_d, ZPR64, ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
defm SST1W : sve_mem_sst_sv_32_unscaled<0b101, "st1w", Z_s, ZPR32, ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
|
|
|
defm SST1D : sve_mem_sst_sv_32_unscaled<0b110, "st1d", Z_d, ZPR64, ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
|
|
|
|
|
|
|
// Scatters using scaled 32-bit offsets, e.g.
|
|
|
|
// st1h z0.s, p0, [x0, z0.s, uxtw #1]
|
|
|
|
// and unpacked:
|
|
|
|
// st1h z0.d, p0, [x0, z0.d, uxtw #1]
|
|
|
|
defm SST1H_D : sve_mem_sst_sv_32_scaled<0b010, "st1h", Z_d, ZPR64, ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm SST1H_S : sve_mem_sst_sv_32_scaled<0b011, "st1h", Z_s, ZPR32, ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm SST1W_D : sve_mem_sst_sv_32_scaled<0b100, "st1w", Z_d, ZPR64, ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm SST1W : sve_mem_sst_sv_32_scaled<0b101, "st1w", Z_s, ZPR32, ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
|
|
|
defm SST1D : sve_mem_sst_sv_32_scaled<0b110, "st1d", Z_d, ZPR64, ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
|
|
|
|
|
|
|
// Scatters using 32/64-bit pointers with offset, e.g.
|
|
|
|
// st1h z0.s, p0, [z0.s, #16]
|
|
|
|
// st1h z0.d, p0, [z0.d, #16]
|
|
|
|
defm SST1B_D : sve_mem_sst_vi_ptrs<0b000, "st1b", Z_d, ZPR64, imm0_31>;
|
|
|
|
defm SST1B_S : sve_mem_sst_vi_ptrs<0b001, "st1b", Z_s, ZPR32, imm0_31>;
|
|
|
|
defm SST1H_D : sve_mem_sst_vi_ptrs<0b010, "st1h", Z_d, ZPR64, uimm5s2>;
|
|
|
|
defm SST1H_S : sve_mem_sst_vi_ptrs<0b011, "st1h", Z_s, ZPR32, uimm5s2>;
|
|
|
|
defm SST1W_D : sve_mem_sst_vi_ptrs<0b100, "st1w", Z_d, ZPR64, uimm5s4>;
|
|
|
|
defm SST1W : sve_mem_sst_vi_ptrs<0b101, "st1w", Z_s, ZPR32, uimm5s4>;
|
|
|
|
defm SST1D : sve_mem_sst_vi_ptrs<0b110, "st1d", Z_d, ZPR64, uimm5s8>;
|
|
|
|
|
|
|
|
// Scatters using unscaled 64-bit offsets, e.g.
|
|
|
|
// st1h z0.d, p0, [x0, z0.d]
|
|
|
|
defm SST1B_D : sve_mem_sst_sv_64_unscaled<0b00, "st1b">;
|
|
|
|
defm SST1H_D : sve_mem_sst_sv_64_unscaled<0b01, "st1h">;
|
|
|
|
defm SST1W_D : sve_mem_sst_sv_64_unscaled<0b10, "st1w">;
|
|
|
|
defm SST1D : sve_mem_sst_sv_64_unscaled<0b11, "st1d">;
|
|
|
|
|
|
|
|
// Scatters using scaled 64-bit offsets, e.g.
|
|
|
|
// st1h z0.d, p0, [x0, z0.d, lsl #1]
|
|
|
|
defm SST1H_D_SCALED : sve_mem_sst_sv_64_scaled<0b01, "st1h", ZPR64ExtLSL16>;
|
|
|
|
defm SST1W_D_SCALED : sve_mem_sst_sv_64_scaled<0b10, "st1w", ZPR64ExtLSL32>;
|
|
|
|
defm SST1D_SCALED : sve_mem_sst_sv_64_scaled<0b11, "st1d", ZPR64ExtLSL64>;
|
|
|
|
|
2018-05-17 17:05:41 +08:00
|
|
|
// ST(2|3|4) structured stores (register + immediate)
|
2018-04-30 02:18:21 +08:00
|
|
|
defm ST2B_IMM : sve_mem_est_si<0b00, 0b01, ZZ_b, "st2b", simm4s2>;
|
|
|
|
defm ST3B_IMM : sve_mem_est_si<0b00, 0b10, ZZZ_b, "st3b", simm4s3>;
|
|
|
|
defm ST4B_IMM : sve_mem_est_si<0b00, 0b11, ZZZZ_b, "st4b", simm4s4>;
|
|
|
|
defm ST2H_IMM : sve_mem_est_si<0b01, 0b01, ZZ_h, "st2h", simm4s2>;
|
|
|
|
defm ST3H_IMM : sve_mem_est_si<0b01, 0b10, ZZZ_h, "st3h", simm4s3>;
|
|
|
|
defm ST4H_IMM : sve_mem_est_si<0b01, 0b11, ZZZZ_h, "st4h", simm4s4>;
|
|
|
|
defm ST2W_IMM : sve_mem_est_si<0b10, 0b01, ZZ_s, "st2w", simm4s2>;
|
|
|
|
defm ST3W_IMM : sve_mem_est_si<0b10, 0b10, ZZZ_s, "st3w", simm4s3>;
|
|
|
|
defm ST4W_IMM : sve_mem_est_si<0b10, 0b11, ZZZZ_s, "st4w", simm4s4>;
|
|
|
|
defm ST2D_IMM : sve_mem_est_si<0b11, 0b01, ZZ_d, "st2d", simm4s2>;
|
|
|
|
defm ST3D_IMM : sve_mem_est_si<0b11, 0b10, ZZZ_d, "st3d", simm4s3>;
|
|
|
|
defm ST4D_IMM : sve_mem_est_si<0b11, 0b11, ZZZZ_d, "st4d", simm4s4>;
|
[AArch64][SVE] Asm: Support for structured ST2, ST3 and ST4 (scalar+imm) store instructions.
Reviewers: fhahn, rengolin, javed.absar, SjoerdMeijer, t.p.northover, echristo, evandro, huntergr
Reviewed By: rengolin
Subscribers: tschuett, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45681
llvm-svn: 330565
2018-04-23 15:50:35 +08:00
|
|
|
|
2018-05-17 17:05:41 +08:00
|
|
|
// ST(2|3|4) structured stores (register + register)
|
|
|
|
def ST2B : sve_mem_est_ss<0b00, 0b01, ZZ_b, "st2b", GPR64NoXZRshifted8>;
|
|
|
|
def ST3B : sve_mem_est_ss<0b00, 0b10, ZZZ_b, "st3b", GPR64NoXZRshifted8>;
|
|
|
|
def ST4B : sve_mem_est_ss<0b00, 0b11, ZZZZ_b, "st4b", GPR64NoXZRshifted8>;
|
|
|
|
def ST2H : sve_mem_est_ss<0b01, 0b01, ZZ_h, "st2h", GPR64NoXZRshifted16>;
|
|
|
|
def ST3H : sve_mem_est_ss<0b01, 0b10, ZZZ_h, "st3h", GPR64NoXZRshifted16>;
|
|
|
|
def ST4H : sve_mem_est_ss<0b01, 0b11, ZZZZ_h, "st4h", GPR64NoXZRshifted16>;
|
|
|
|
def ST2W : sve_mem_est_ss<0b10, 0b01, ZZ_s, "st2w", GPR64NoXZRshifted32>;
|
|
|
|
def ST3W : sve_mem_est_ss<0b10, 0b10, ZZZ_s, "st3w", GPR64NoXZRshifted32>;
|
|
|
|
def ST4W : sve_mem_est_ss<0b10, 0b11, ZZZZ_s, "st4w", GPR64NoXZRshifted32>;
|
|
|
|
def ST2D : sve_mem_est_ss<0b11, 0b01, ZZ_d, "st2d", GPR64NoXZRshifted64>;
|
|
|
|
def ST3D : sve_mem_est_ss<0b11, 0b10, ZZZ_d, "st3d", GPR64NoXZRshifted64>;
|
|
|
|
def ST4D : sve_mem_est_ss<0b11, 0b11, ZZZZ_d, "st4d", GPR64NoXZRshifted64>;
|
|
|
|
|
2018-05-02 19:48:49 +08:00
|
|
|
// Non-temporal contiguous stores (register + immediate)
|
|
|
|
defm STNT1B_ZRI : sve_mem_cstnt_si<0b00, "stnt1b", Z_b, ZPR8>;
|
|
|
|
defm STNT1H_ZRI : sve_mem_cstnt_si<0b01, "stnt1h", Z_h, ZPR16>;
|
|
|
|
defm STNT1W_ZRI : sve_mem_cstnt_si<0b10, "stnt1w", Z_s, ZPR32>;
|
|
|
|
defm STNT1D_ZRI : sve_mem_cstnt_si<0b11, "stnt1d", Z_d, ZPR64>;
|
|
|
|
|
|
|
|
// Non-temporal contiguous stores (register + register)
|
|
|
|
defm STNT1B_ZRR : sve_mem_cstnt_ss<0b00, "stnt1b", Z_b, ZPR8, GPR64NoXZRshifted8>;
|
|
|
|
defm STNT1H_ZRR : sve_mem_cstnt_ss<0b01, "stnt1h", Z_h, ZPR16, GPR64NoXZRshifted16>;
|
|
|
|
defm STNT1W_ZRR : sve_mem_cstnt_ss<0b10, "stnt1w", Z_s, ZPR32, GPR64NoXZRshifted32>;
|
|
|
|
defm STNT1D_ZRR : sve_mem_cstnt_ss<0b11, "stnt1d", Z_d, ZPR64, GPR64NoXZRshifted64>;
|
|
|
|
|
2018-05-02 21:32:39 +08:00
|
|
|
// Fill/Spill
|
|
|
|
defm LDR_ZXI : sve_mem_z_fill<"ldr">;
|
|
|
|
defm LDR_PXI : sve_mem_p_fill<"ldr">;
|
|
|
|
defm STR_ZXI : sve_mem_z_spill<"str">;
|
|
|
|
defm STR_PXI : sve_mem_p_spill<"str">;
|
|
|
|
|
2018-05-16 15:50:09 +08:00
|
|
|
// Contiguous prefetch (register + immediate)
|
|
|
|
defm PRFB_PRI : sve_mem_prfm_si<0b00, "prfb">;
|
|
|
|
defm PRFH_PRI : sve_mem_prfm_si<0b01, "prfh">;
|
|
|
|
defm PRFW_PRI : sve_mem_prfm_si<0b10, "prfw">;
|
|
|
|
defm PRFD_PRI : sve_mem_prfm_si<0b11, "prfd">;
|
|
|
|
|
|
|
|
// Contiguous prefetch (register + register)
|
|
|
|
def PRFB_PRR : sve_mem_prfm_ss<0b001, "prfb", GPR64NoXZRshifted8>;
|
|
|
|
def PRFH_PRR : sve_mem_prfm_ss<0b011, "prfh", GPR64NoXZRshifted16>;
|
|
|
|
def PRFS_PRR : sve_mem_prfm_ss<0b101, "prfw", GPR64NoXZRshifted32>;
|
|
|
|
def PRFD_PRR : sve_mem_prfm_ss<0b111, "prfd", GPR64NoXZRshifted64>;
|
|
|
|
|
2018-05-16 22:16:01 +08:00
|
|
|
// Gather prefetch using scaled 32-bit offsets, e.g.
|
|
|
|
// prfh pldl1keep, p0, [x0, z0.s, uxtw #1]
|
|
|
|
defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
|
|
|
defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
|
|
|
defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
|
|
|
defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64>;
|
|
|
|
|
|
|
|
// Gather prefetch using unpacked, scaled 32-bit offsets, e.g.
|
|
|
|
// prfh pldl1keep, p0, [x0, z0.d, uxtw #1]
|
|
|
|
defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
|
|
|
defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
|
|
|
defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
|
|
|
defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
|
|
|
|
|
|
|
// Gather prefetch using scaled 64-bit offsets, e.g.
|
|
|
|
// prfh pldl1keep, p0, [x0, z0.d, lsl #1]
|
|
|
|
defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8>;
|
|
|
|
defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16>;
|
|
|
|
defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32>;
|
|
|
|
defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64>;
|
|
|
|
|
|
|
|
// Gather prefetch using 32/64-bit pointers with offset, e.g.
|
|
|
|
// prfh pldl1keep, p0, [z0.s, #16]
|
|
|
|
// prfh pldl1keep, p0, [z0.d, #16]
|
|
|
|
defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31>;
|
|
|
|
defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2>;
|
|
|
|
defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4>;
|
|
|
|
defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8>;
|
|
|
|
|
|
|
|
defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31>;
|
|
|
|
defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2>;
|
|
|
|
defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4>;
|
|
|
|
defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for ADR instruction.
Supporting various addressing modes:
- adr z0.s, [z0.s, z0.s]
- adr z0.s, [z0.s, z0.s, lsl #<shift>]
- adr z0.d, [z0.d, z0.d]
- adr z0.d, [z0.d, z0.d, lsl #<shift>]
- adr z0.d, [z0.d, z0.d, uxtw #<shift>]
- adr z0.d, [z0.d, z0.d, sxtw #<shift>]
Reviewers: rengolin, fhahn, SjoerdMeijer, samparker, javed.absar
Reviewed By: SjoerdMeijer
Differential Revision: https://reviews.llvm.org/D48870
llvm-svn: 336533
2018-07-09 17:58:24 +08:00
|
|
|
defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">;
|
|
|
|
defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">;
|
|
|
|
defm ADR_LSL_ZZZ_S : sve_int_bin_cons_misc_0_a_32_lsl<0b10, "adr">;
|
|
|
|
defm ADR_LSL_ZZZ_D : sve_int_bin_cons_misc_0_a_64_lsl<0b11, "adr">;
|
|
|
|
|
2018-07-09 20:32:56 +08:00
|
|
|
defm TBL_ZZZ : sve_int_perm_tbl<"tbl">;
|
|
|
|
|
2017-12-20 19:02:42 +08:00
|
|
|
defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1">;
|
|
|
|
defm ZIP2_ZZZ : sve_int_perm_bin_perm_zz<0b001, "zip2">;
|
2018-07-09 17:12:17 +08:00
|
|
|
defm UZP1_ZZZ : sve_int_perm_bin_perm_zz<0b010, "uzp1">;
|
|
|
|
defm UZP2_ZZZ : sve_int_perm_bin_perm_zz<0b011, "uzp2">;
|
|
|
|
defm TRN1_ZZZ : sve_int_perm_bin_perm_zz<0b100, "trn1">;
|
|
|
|
defm TRN2_ZZZ : sve_int_perm_bin_perm_zz<0b101, "trn2">;
|
2017-12-20 19:02:42 +08:00
|
|
|
|
|
|
|
defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1">;
|
|
|
|
defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2">;
|
2018-07-09 17:12:17 +08:00
|
|
|
defm UZP1_PPP : sve_int_perm_bin_perm_pp<0b010, "uzp1">;
|
|
|
|
defm UZP2_PPP : sve_int_perm_bin_perm_pp<0b011, "uzp2">;
|
|
|
|
defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1">;
|
|
|
|
defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2">;
|
2018-01-10 19:32:47 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for vector element compares.
This patch adds instructions for comparing elements from two vectors, e.g.
cmpgt p0.s, p0/z, z0.s, z1.s
and also adds support for comparing to a 64-bit wide element vector, e.g.
cmpgt p0.s, p0/z, z0.s, z1.d
The patch also contains aliases for certain comparisons, e.g.:
cmple p0.s, p0/z, z0.s, z1.s => cmpge p0.s, p0/z, z1.s, z0.s
cmplo p0.s, p0/z, z0.s, z1.s => cmphi p0.s, p0/z, z1.s, z0.s
cmpls p0.s, p0/z, z0.s, z1.s => cmphs p0.s, p0/z, z1.s, z0.s
cmplt p0.s, p0/z, z0.s, z1.s => cmpgt p0.s, p0/z, z1.s, z0.s
llvm-svn: 334931
2018-06-18 18:59:19 +08:00
|
|
|
defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs">;
|
|
|
|
defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi">;
|
|
|
|
defm CMPGE_PPzZZ : sve_int_cmp_0<0b100, "cmpge">;
|
|
|
|
defm CMPGT_PPzZZ : sve_int_cmp_0<0b101, "cmpgt">;
|
|
|
|
defm CMPEQ_PPzZZ : sve_int_cmp_0<0b110, "cmpeq">;
|
|
|
|
defm CMPNE_PPzZZ : sve_int_cmp_0<0b111, "cmpne">;
|
|
|
|
|
|
|
|
defm CMPEQ_WIDE_PPzZZ : sve_int_cmp_0_wide<0b010, "cmpeq">;
|
|
|
|
defm CMPNE_WIDE_PPzZZ : sve_int_cmp_0_wide<0b011, "cmpne">;
|
|
|
|
defm CMPGE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b000, "cmpge">;
|
|
|
|
defm CMPGT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b001, "cmpgt">;
|
|
|
|
defm CMPLT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b010, "cmplt">;
|
|
|
|
defm CMPLE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b011, "cmple">;
|
|
|
|
defm CMPHS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b100, "cmphs">;
|
|
|
|
defm CMPHI_WIDE_PPzZZ : sve_int_cmp_1_wide<0b101, "cmphi">;
|
|
|
|
defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo">;
|
|
|
|
defm CMPLS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b111, "cmpls">;
|
|
|
|
|
2018-07-02 16:20:59 +08:00
|
|
|
defm CMPGE_PPzZI : sve_int_scmp_vi<0b000, "cmpge">;
|
|
|
|
defm CMPGT_PPzZI : sve_int_scmp_vi<0b001, "cmpgt">;
|
|
|
|
defm CMPLT_PPzZI : sve_int_scmp_vi<0b010, "cmplt">;
|
|
|
|
defm CMPLE_PPzZI : sve_int_scmp_vi<0b011, "cmple">;
|
|
|
|
defm CMPEQ_PPzZI : sve_int_scmp_vi<0b100, "cmpeq">;
|
|
|
|
defm CMPNE_PPzZI : sve_int_scmp_vi<0b101, "cmpne">;
|
|
|
|
defm CMPHS_PPzZI : sve_int_ucmp_vi<0b00, "cmphs">;
|
|
|
|
defm CMPHI_PPzZI : sve_int_ucmp_vi<0b01, "cmphi">;
|
|
|
|
defm CMPLO_PPzZI : sve_int_ucmp_vi<0b10, "cmplo">;
|
|
|
|
defm CMPLS_PPzZI : sve_int_ucmp_vi<0b11, "cmpls">;
|
|
|
|
|
[AArch64][SVE] Asm: Support for vector element FP compare.
Contains the following variants:
- Compare with (elements from) other vector
instructions: fcmeq, fcmgt, fcmge, fcmne, fcmuo.
aliases: fcmle, fcmlt.
e.g. fcmle p0.h, p0/z, z0.h, z1.h => fcmge p0.h, p0/z, z1.h, z0.h
- Compare absolute values with (absolute values from) other vector.
instructions: facge, facgt.
aliases: facle, faclt.
e.g. facle p0.h, p0/z, z0.h, z1.h => facge p0.h, p0/z, z1.h, z0.h
- Compare vector elements with #0.0
instructions: fcmeq, fcmgt, fcmge, fcmle, fcmlt, fcmne.
e.g. fcmle p0.h, p0/z, z0.h, #0.0
llvm-svn: 336182
2018-07-03 17:07:23 +08:00
|
|
|
defm FCMGE_PPzZZ : sve_fp_3op_p_pd<0b000, "fcmge">;
|
|
|
|
defm FCMGT_PPzZZ : sve_fp_3op_p_pd<0b001, "fcmgt">;
|
|
|
|
defm FCMEQ_PPzZZ : sve_fp_3op_p_pd<0b010, "fcmeq">;
|
|
|
|
defm FCMNE_PPzZZ : sve_fp_3op_p_pd<0b011, "fcmne">;
|
|
|
|
defm FCMUO_PPzZZ : sve_fp_3op_p_pd<0b100, "fcmuo">;
|
|
|
|
defm FACGE_PPzZZ : sve_fp_3op_p_pd<0b101, "facge">;
|
|
|
|
defm FACGT_PPzZZ : sve_fp_3op_p_pd<0b111, "facgt">;
|
|
|
|
|
|
|
|
defm FCMGE_PPzZ0 : sve_fp_2op_p_pd<0b000, "fcmge">;
|
|
|
|
defm FCMGT_PPzZ0 : sve_fp_2op_p_pd<0b001, "fcmgt">;
|
|
|
|
defm FCMLT_PPzZ0 : sve_fp_2op_p_pd<0b010, "fcmlt">;
|
|
|
|
defm FCMLE_PPzZ0 : sve_fp_2op_p_pd<0b011, "fcmle">;
|
|
|
|
defm FCMEQ_PPzZ0 : sve_fp_2op_p_pd<0b100, "fcmeq">;
|
|
|
|
defm FCMNE_PPzZ0 : sve_fp_2op_p_pd<0b110, "fcmne">;
|
|
|
|
|
2018-07-09 23:22:08 +08:00
|
|
|
def RDVLI_XI : sve_int_read_vl_a<0b0, 0b11111, "rdvl">;
|
|
|
|
def ADDVL_XXI : sve_int_arith_vl<0b0, "addvl">;
|
|
|
|
def ADDPL_XXI : sve_int_arith_vl<0b1, "addpl">;
|
|
|
|
|
|
|
|
defm CNTB_XPiI : sve_int_count<0b000, "cntb">;
|
|
|
|
defm CNTH_XPiI : sve_int_count<0b010, "cnth">;
|
|
|
|
defm CNTW_XPiI : sve_int_count<0b100, "cntw">;
|
|
|
|
defm CNTD_XPiI : sve_int_count<0b110, "cntd">;
|
|
|
|
defm CNTP_XPP : sve_int_pcount_pred<0b0000, "cntp">;
|
|
|
|
|
|
|
|
defm INCB_XPiI : sve_int_pred_pattern_a<0b000, "incb">;
|
|
|
|
defm DECB_XPiI : sve_int_pred_pattern_a<0b001, "decb">;
|
|
|
|
defm INCH_XPiI : sve_int_pred_pattern_a<0b010, "inch">;
|
|
|
|
defm DECH_XPiI : sve_int_pred_pattern_a<0b011, "dech">;
|
|
|
|
defm INCW_XPiI : sve_int_pred_pattern_a<0b100, "incw">;
|
|
|
|
defm DECW_XPiI : sve_int_pred_pattern_a<0b101, "decw">;
|
|
|
|
defm INCD_XPiI : sve_int_pred_pattern_a<0b110, "incd">;
|
|
|
|
defm DECD_XPiI : sve_int_pred_pattern_a<0b111, "decd">;
|
|
|
|
|
2018-07-02 15:34:52 +08:00
|
|
|
defm SQINCB_XPiWdI : sve_int_pred_pattern_b_s32<0b00000, "sqincb">;
|
|
|
|
defm UQINCB_WPiI : sve_int_pred_pattern_b_u32<0b00001, "uqincb">;
|
|
|
|
defm SQDECB_XPiWdI : sve_int_pred_pattern_b_s32<0b00010, "sqdecb">;
|
|
|
|
defm UQDECB_WPiI : sve_int_pred_pattern_b_u32<0b00011, "uqdecb">;
|
2018-06-18 22:47:52 +08:00
|
|
|
defm SQINCB_XPiI : sve_int_pred_pattern_b_x64<0b00100, "sqincb">;
|
|
|
|
defm UQINCB_XPiI : sve_int_pred_pattern_b_x64<0b00101, "uqincb">;
|
|
|
|
defm SQDECB_XPiI : sve_int_pred_pattern_b_x64<0b00110, "sqdecb">;
|
|
|
|
defm UQDECB_XPiI : sve_int_pred_pattern_b_x64<0b00111, "uqdecb">;
|
|
|
|
|
2018-07-02 15:34:52 +08:00
|
|
|
defm SQINCH_XPiWdI : sve_int_pred_pattern_b_s32<0b01000, "sqinch">;
|
|
|
|
defm UQINCH_WPiI : sve_int_pred_pattern_b_u32<0b01001, "uqinch">;
|
|
|
|
defm SQDECH_XPiWdI : sve_int_pred_pattern_b_s32<0b01010, "sqdech">;
|
|
|
|
defm UQDECH_WPiI : sve_int_pred_pattern_b_u32<0b01011, "uqdech">;
|
2018-06-18 22:47:52 +08:00
|
|
|
defm SQINCH_XPiI : sve_int_pred_pattern_b_x64<0b01100, "sqinch">;
|
|
|
|
defm UQINCH_XPiI : sve_int_pred_pattern_b_x64<0b01101, "uqinch">;
|
|
|
|
defm SQDECH_XPiI : sve_int_pred_pattern_b_x64<0b01110, "sqdech">;
|
|
|
|
defm UQDECH_XPiI : sve_int_pred_pattern_b_x64<0b01111, "uqdech">;
|
|
|
|
|
2018-07-02 15:34:52 +08:00
|
|
|
defm SQINCW_XPiWdI : sve_int_pred_pattern_b_s32<0b10000, "sqincw">;
|
|
|
|
defm UQINCW_WPiI : sve_int_pred_pattern_b_u32<0b10001, "uqincw">;
|
|
|
|
defm SQDECW_XPiWdI : sve_int_pred_pattern_b_s32<0b10010, "sqdecw">;
|
|
|
|
defm UQDECW_WPiI : sve_int_pred_pattern_b_u32<0b10011, "uqdecw">;
|
2018-06-18 22:47:52 +08:00
|
|
|
defm SQINCW_XPiI : sve_int_pred_pattern_b_x64<0b10100, "sqincw">;
|
|
|
|
defm UQINCW_XPiI : sve_int_pred_pattern_b_x64<0b10101, "uqincw">;
|
|
|
|
defm SQDECW_XPiI : sve_int_pred_pattern_b_x64<0b10110, "sqdecw">;
|
|
|
|
defm UQDECW_XPiI : sve_int_pred_pattern_b_x64<0b10111, "uqdecw">;
|
|
|
|
|
2018-07-02 15:34:52 +08:00
|
|
|
defm SQINCD_XPiWdI : sve_int_pred_pattern_b_s32<0b11000, "sqincd">;
|
|
|
|
defm UQINCD_WPiI : sve_int_pred_pattern_b_u32<0b11001, "uqincd">;
|
|
|
|
defm SQDECD_XPiWdI : sve_int_pred_pattern_b_s32<0b11010, "sqdecd">;
|
|
|
|
defm UQDECD_WPiI : sve_int_pred_pattern_b_u32<0b11011, "uqdecd">;
|
2018-06-18 22:47:52 +08:00
|
|
|
defm SQINCD_XPiI : sve_int_pred_pattern_b_x64<0b11100, "sqincd">;
|
|
|
|
defm UQINCD_XPiI : sve_int_pred_pattern_b_x64<0b11101, "uqincd">;
|
|
|
|
defm SQDECD_XPiI : sve_int_pred_pattern_b_x64<0b11110, "sqdecd">;
|
|
|
|
defm UQDECD_XPiI : sve_int_pred_pattern_b_x64<0b11111, "uqdecd">;
|
|
|
|
|
[AArch64][SVE] Asm: Support for (saturating) vector INC/DEC instructions.
Increment/decrement vector by multiple of predicate constraint
element count.
The variants added by this patch are:
- INCH, INCW, INC
and (saturating):
- SQINCH, SQINCW, SQINCD
- UQINCH, UQINCW, UQINCW
- SQDECH, SQINCW, SQINCD
- UQDECH, UQINCW, UQINCW
For example:
incw z0.s, all, mul #4
llvm-svn: 336090
2018-07-02 17:31:11 +08:00
|
|
|
defm SQINCH_ZPiI : sve_int_countvlv<0b01000, "sqinch", ZPR16>;
|
|
|
|
defm UQINCH_ZPiI : sve_int_countvlv<0b01001, "uqinch", ZPR16>;
|
|
|
|
defm SQDECH_ZPiI : sve_int_countvlv<0b01010, "sqdech", ZPR16>;
|
|
|
|
defm UQDECH_ZPiI : sve_int_countvlv<0b01011, "uqdech", ZPR16>;
|
|
|
|
defm INCH_ZPiI : sve_int_countvlv<0b01100, "inch", ZPR16>;
|
|
|
|
defm DECH_ZPiI : sve_int_countvlv<0b01101, "dech", ZPR16>;
|
|
|
|
defm SQINCW_ZPiI : sve_int_countvlv<0b10000, "sqincw", ZPR32>;
|
|
|
|
defm UQINCW_ZPiI : sve_int_countvlv<0b10001, "uqincw", ZPR32>;
|
|
|
|
defm SQDECW_ZPiI : sve_int_countvlv<0b10010, "sqdecw", ZPR32>;
|
|
|
|
defm UQDECW_ZPiI : sve_int_countvlv<0b10011, "uqdecw", ZPR32>;
|
|
|
|
defm INCW_ZPiI : sve_int_countvlv<0b10100, "incw", ZPR32>;
|
|
|
|
defm DECW_ZPiI : sve_int_countvlv<0b10101, "decw", ZPR32>;
|
|
|
|
defm SQINCD_ZPiI : sve_int_countvlv<0b11000, "sqincd", ZPR64>;
|
|
|
|
defm UQINCD_ZPiI : sve_int_countvlv<0b11001, "uqincd", ZPR64>;
|
|
|
|
defm SQDECD_ZPiI : sve_int_countvlv<0b11010, "sqdecd", ZPR64>;
|
|
|
|
defm UQDECD_ZPiI : sve_int_countvlv<0b11011, "uqdecd", ZPR64>;
|
|
|
|
defm INCD_ZPiI : sve_int_countvlv<0b11100, "incd", ZPR64>;
|
|
|
|
defm DECD_ZPiI : sve_int_countvlv<0b11101, "decd", ZPR64>;
|
|
|
|
|
[AArch64][SVE] Asm: Support for (SQ)INCP/DECP (scalar, vector)
Increments/decrements the result with the number of active bits
from the predicate.
The inc/dec variants added are:
- incp x0, p0.h (scalar)
- incp z0.h, p0 (vector)
The unsigned saturating inc/dec variants added are:
- uqincp x0, p0.h (scalar)
- uqincp w0, p0.h (scalar, 32bit)
- uqincp z0.h, p0 (vector)
The signed saturating inc/dec variants added are:
- sqincp x0, p0.h (scalar)
- sqincp x0, p0.h, w0 (scalar, 32bit)
- sqincp z0.h, p0 (vector)
llvm-svn: 336091
2018-07-02 18:08:36 +08:00
|
|
|
defm SQINCP_XPWd : sve_int_count_r_s32<0b00000, "sqincp">;
|
|
|
|
defm SQINCP_XP : sve_int_count_r_x64<0b00010, "sqincp">;
|
|
|
|
defm UQINCP_WP : sve_int_count_r_u32<0b00100, "uqincp">;
|
|
|
|
defm UQINCP_XP : sve_int_count_r_x64<0b00110, "uqincp">;
|
|
|
|
defm SQDECP_XPWd : sve_int_count_r_s32<0b01000, "sqdecp">;
|
|
|
|
defm SQDECP_XP : sve_int_count_r_x64<0b01010, "sqdecp">;
|
|
|
|
defm UQDECP_WP : sve_int_count_r_u32<0b01100, "uqdecp">;
|
|
|
|
defm UQDECP_XP : sve_int_count_r_x64<0b01110, "uqdecp">;
|
|
|
|
defm INCP_XP : sve_int_count_r_x64<0b10000, "incp">;
|
|
|
|
defm DECP_XP : sve_int_count_r_x64<0b10100, "decp">;
|
|
|
|
|
|
|
|
defm SQINCP_ZP : sve_int_count_v<0b00000, "sqincp">;
|
|
|
|
defm UQINCP_ZP : sve_int_count_v<0b00100, "uqincp">;
|
|
|
|
defm SQDECP_ZP : sve_int_count_v<0b01000, "sqdecp">;
|
|
|
|
defm UQDECP_ZP : sve_int_count_v<0b01100, "uqdecp">;
|
|
|
|
defm INCP_ZP : sve_int_count_v<0b10000, "incp">;
|
|
|
|
defm DECP_ZP : sve_int_count_v<0b10100, "decp">;
|
|
|
|
|
[AArch64][SVE] Asm: Add support for SVE INDEX instructions.
Reviewers: rengolin, fhahn, javed.absar, SjoerdMeijer, huntergr, t.p.northover, echristo, evandro
Reviewed By: rengolin, fhahn
Subscribers: tschuett, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D45370
llvm-svn: 329674
2018-04-10 15:01:53 +08:00
|
|
|
defm INDEX_RR : sve_int_index_rr<"index">;
|
|
|
|
defm INDEX_IR : sve_int_index_ir<"index">;
|
|
|
|
defm INDEX_RI : sve_int_index_ri<"index">;
|
|
|
|
defm INDEX_II : sve_int_index_ii<"index">;
|
[AArch64][SVE] Asm: Add support for unpredicated LSL/LSR (shift by immediate) instructions.
Reviewers: rengolin, fhahn, javed.absar, SjoerdMeijer, huntergr, t.p.northover, echristo, evandro
Reviewed By: rengolin, fhahn
Subscribers: tschuett, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D45371
llvm-svn: 329681
2018-04-10 18:03:13 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for remaining shift instructions.
This patch completes support for shifts, which include:
- LSL - Logical Shift Left
- LSLR - Logical Shift Left, Reversed form
- LSR - Logical Shift Right
- LSRR - Logical Shift Right, Reversed form
- ASR - Arithmetic Shift Right
- ASRR - Arithmetic Shift Right, Reversed form
- ASRD - Arithmetic Shift Right for Divide
In the following variants:
- Predicated shift by immediate - ASR, LSL, LSR, ASRD
e.g.
asr z0.h, p0/m, z0.h, #1
(active lanes of z0 shifted by #1)
- Unpredicated shift by immediate - ASR, LSL*, LSR*
e.g.
asr z0.h, z1.h, #1
(all lanes of z1 shifted by #1, stored in z0)
- Predicated shift by vector - ASR, LSL*, LSR*
e.g.
asr z0.h, p0/m, z0.h, z1.h
(active lanes of z0 shifted by z1, stored in z0)
- Predicated shift by vector, reversed form - ASRR, LSLR, LSRR
e.g.
lslr z0.h, p0/m, z0.h, z1.h
(active lanes of z1 shifted by z0, stored in z0)
- Predicated shift left/right by wide vector - ASR, LSL, LSR
e.g.
lsl z0.h, p0/m, z0.h, z1.d
(active lanes of z0 shifted by wide elements of vector z1)
- Unpredicated shift left/right by wide vector - ASR, LSL, LSR
e.g.
lsl z0.h, z1.h, z2.d
(all lanes of z1 shifted by wide elements of z2, stored in z0)
*Variants added in previous patches.
llvm-svn: 336547
2018-07-09 21:23:41 +08:00
|
|
|
// Unpredicated shifts
|
|
|
|
defm ASR_ZZI : sve_int_bin_cons_shift_imm_right<0b00, "asr">;
|
|
|
|
defm LSR_ZZI : sve_int_bin_cons_shift_imm_right<0b01, "lsr">;
|
|
|
|
defm LSL_ZZI : sve_int_bin_cons_shift_imm_left< 0b11, "lsl">;
|
|
|
|
|
|
|
|
defm ASR_WIDE_ZZZ : sve_int_bin_cons_shift_wide<0b00, "asr">;
|
|
|
|
defm LSR_WIDE_ZZZ : sve_int_bin_cons_shift_wide<0b01, "lsr">;
|
|
|
|
defm LSL_WIDE_ZZZ : sve_int_bin_cons_shift_wide<0b11, "lsl">;
|
|
|
|
|
|
|
|
// Predicated shifts
|
|
|
|
defm ASR_ZPmI : sve_int_bin_pred_shift_imm_right<0b000, "asr">;
|
|
|
|
defm LSR_ZPmI : sve_int_bin_pred_shift_imm_right<0b001, "lsr">;
|
|
|
|
defm LSL_ZPmI : sve_int_bin_pred_shift_imm_left< 0b011, "lsl">;
|
|
|
|
defm ASRD_ZPmI : sve_int_bin_pred_shift_imm_right<0b100, "asrd">;
|
|
|
|
|
|
|
|
defm ASR_ZPmZ : sve_int_bin_pred_shift<0b000, "asr">;
|
|
|
|
defm LSR_ZPmZ : sve_int_bin_pred_shift<0b001, "lsr">;
|
|
|
|
defm LSL_ZPmZ : sve_int_bin_pred_shift<0b011, "lsl">;
|
|
|
|
defm ASRR_ZPmZ : sve_int_bin_pred_shift<0b100, "asrr">;
|
|
|
|
defm LSRR_ZPmZ : sve_int_bin_pred_shift<0b101, "lsrr">;
|
|
|
|
defm LSLR_ZPmZ : sve_int_bin_pred_shift<0b111, "lslr">;
|
|
|
|
|
|
|
|
defm ASR_WIDE_ZPmZ : sve_int_bin_pred_shift_wide<0b000, "asr">;
|
|
|
|
defm LSR_WIDE_ZPmZ : sve_int_bin_pred_shift_wide<0b001, "lsr">;
|
|
|
|
defm LSL_WIDE_ZPmZ : sve_int_bin_pred_shift_wide<0b011, "lsl">;
|
[AArch64][SVE] Asm: Support for AND, ORR, EOR and BIC instructions.
This patch addresses the following variants:
- bitmask immediate, e.g. 'and z0.d, z0.d, #0x6'.
- unpredicated data vectors, e.g. 'and z0.d, z1.d, z2.d'.
- predicated data vectors, e.g. 'and z0.d, p0/m, z0.d, z1.d'.
And also several aliases, such as:
- ORN, alias of ORR.
- EON, alias of EOR.
- BIC, alias of AND (immediate variant)
- MOV, alias of ORR (if unpredicated and source register operands are the same)
Reviewers: rengolin, huntergr, fhahn, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D47363
llvm-svn: 333414
2018-05-29 21:08:43 +08:00
|
|
|
|
[AArch64][SVE] Asm: Support for FP conversion instructions.
The variants added are:
- fcvt (FP convert precision)
- scvtf (signed int -> FP)
- ucvtf (unsigned int -> FP)
- fcvtzs (FP -> signed int (round to zero))
- fcvtzu (FP -> unsigned int (round to zero))
For example:
fcvt z0.h, p0/m, z0.s (single- to half-precision FP)
scvtf z0.h, p0/m, z0.s (32-bit int to half-precision FP)
ucvtf z0.h, p0/m, z0.s (32-bit unsigned int to half-precision FP)
fcvtzs z0.s, p0/m, z0.h (half-precision FP to 32-bit int)
fcvtzu z0.s, p0/m, z0.h (half-precision FP to 32-bit unsigned int)
llvm-svn: 336265
2018-07-04 20:13:17 +08:00
|
|
|
def FCVT_ZPmZ_StoH : sve_fp_2op_p_zd<0b1001000, "fcvt", ZPR32, ZPR16>;
|
|
|
|
def FCVT_ZPmZ_HtoS : sve_fp_2op_p_zd<0b1001001, "fcvt", ZPR16, ZPR32>;
|
|
|
|
def SCVTF_ZPmZ_HtoH : sve_fp_2op_p_zd<0b0110010, "scvtf", ZPR16, ZPR16>;
|
|
|
|
def SCVTF_ZPmZ_StoS : sve_fp_2op_p_zd<0b1010100, "scvtf", ZPR32, ZPR32>;
|
|
|
|
def UCVTF_ZPmZ_StoS : sve_fp_2op_p_zd<0b1010101, "ucvtf", ZPR32, ZPR32>;
|
|
|
|
def UCVTF_ZPmZ_HtoH : sve_fp_2op_p_zd<0b0110011, "ucvtf", ZPR16, ZPR16>;
|
|
|
|
def FCVTZS_ZPmZ_HtoH : sve_fp_2op_p_zd<0b0111010, "fcvtzs", ZPR16, ZPR16>;
|
|
|
|
def FCVTZS_ZPmZ_StoS : sve_fp_2op_p_zd<0b1011100, "fcvtzs", ZPR32, ZPR32>;
|
|
|
|
def FCVTZU_ZPmZ_HtoH : sve_fp_2op_p_zd<0b0111011, "fcvtzu", ZPR16, ZPR16>;
|
|
|
|
def FCVTZU_ZPmZ_StoS : sve_fp_2op_p_zd<0b1011101, "fcvtzu", ZPR32, ZPR32>;
|
|
|
|
def FCVT_ZPmZ_DtoH : sve_fp_2op_p_zd<0b1101000, "fcvt", ZPR64, ZPR16>;
|
|
|
|
def FCVT_ZPmZ_HtoD : sve_fp_2op_p_zd<0b1101001, "fcvt", ZPR16, ZPR64>;
|
|
|
|
def FCVT_ZPmZ_DtoS : sve_fp_2op_p_zd<0b1101010, "fcvt", ZPR64, ZPR32>;
|
|
|
|
def FCVT_ZPmZ_StoD : sve_fp_2op_p_zd<0b1101011, "fcvt", ZPR32, ZPR64>;
|
|
|
|
def SCVTF_ZPmZ_StoD : sve_fp_2op_p_zd<0b1110000, "scvtf", ZPR32, ZPR64>;
|
|
|
|
def UCVTF_ZPmZ_StoD : sve_fp_2op_p_zd<0b1110001, "ucvtf", ZPR32, ZPR64>;
|
|
|
|
def UCVTF_ZPmZ_StoH : sve_fp_2op_p_zd<0b0110101, "ucvtf", ZPR32, ZPR16>;
|
|
|
|
def SCVTF_ZPmZ_DtoS : sve_fp_2op_p_zd<0b1110100, "scvtf", ZPR64, ZPR32>;
|
|
|
|
def SCVTF_ZPmZ_StoH : sve_fp_2op_p_zd<0b0110100, "scvtf", ZPR32, ZPR16>;
|
|
|
|
def SCVTF_ZPmZ_DtoH : sve_fp_2op_p_zd<0b0110110, "scvtf", ZPR64, ZPR16>;
|
|
|
|
def UCVTF_ZPmZ_DtoS : sve_fp_2op_p_zd<0b1110101, "ucvtf", ZPR64, ZPR32>;
|
|
|
|
def UCVTF_ZPmZ_DtoH : sve_fp_2op_p_zd<0b0110111, "ucvtf", ZPR64, ZPR16>;
|
|
|
|
def SCVTF_ZPmZ_DtoD : sve_fp_2op_p_zd<0b1110110, "scvtf", ZPR64, ZPR64>;
|
|
|
|
def UCVTF_ZPmZ_DtoD : sve_fp_2op_p_zd<0b1110111, "ucvtf", ZPR64, ZPR64>;
|
|
|
|
def FCVTZS_ZPmZ_DtoS : sve_fp_2op_p_zd<0b1111000, "fcvtzs", ZPR64, ZPR32>;
|
|
|
|
def FCVTZU_ZPmZ_DtoS : sve_fp_2op_p_zd<0b1111001, "fcvtzu", ZPR64, ZPR32>;
|
|
|
|
def FCVTZS_ZPmZ_StoD : sve_fp_2op_p_zd<0b1111100, "fcvtzs", ZPR32, ZPR64>;
|
|
|
|
def FCVTZS_ZPmZ_HtoS : sve_fp_2op_p_zd<0b0111100, "fcvtzs", ZPR16, ZPR32>;
|
|
|
|
def FCVTZS_ZPmZ_HtoD : sve_fp_2op_p_zd<0b0111110, "fcvtzs", ZPR16, ZPR64>;
|
|
|
|
def FCVTZU_ZPmZ_HtoS : sve_fp_2op_p_zd<0b0111101, "fcvtzu", ZPR16, ZPR32>;
|
|
|
|
def FCVTZU_ZPmZ_HtoD : sve_fp_2op_p_zd<0b0111111, "fcvtzu", ZPR16, ZPR64>;
|
|
|
|
def FCVTZU_ZPmZ_StoD : sve_fp_2op_p_zd<0b1111101, "fcvtzu", ZPR32, ZPR64>;
|
|
|
|
def FCVTZS_ZPmZ_DtoD : sve_fp_2op_p_zd<0b1111110, "fcvtzs", ZPR64, ZPR64>;
|
|
|
|
def FCVTZU_ZPmZ_DtoD : sve_fp_2op_p_zd<0b1111111, "fcvtzu", ZPR64, ZPR64>;
|
|
|
|
|
2018-07-06 04:21:21 +08:00
|
|
|
defm FRINTN_ZPmZ : sve_fp_2op_p_zd_HSD<0b00000, "frintn">;
|
|
|
|
defm FRINTP_ZPmZ : sve_fp_2op_p_zd_HSD<0b00001, "frintp">;
|
|
|
|
defm FRINTM_ZPmZ : sve_fp_2op_p_zd_HSD<0b00010, "frintm">;
|
|
|
|
defm FRINTZ_ZPmZ : sve_fp_2op_p_zd_HSD<0b00011, "frintz">;
|
|
|
|
defm FRINTA_ZPmZ : sve_fp_2op_p_zd_HSD<0b00100, "frinta">;
|
|
|
|
defm FRINTX_ZPmZ : sve_fp_2op_p_zd_HSD<0b00110, "frintx">;
|
|
|
|
defm FRINTI_ZPmZ : sve_fp_2op_p_zd_HSD<0b00111, "frinti">;
|
|
|
|
defm FRECPX_ZPmZ : sve_fp_2op_p_zd_HSD<0b01100, "frecpx">;
|
|
|
|
defm FSQRT_ZPmZ : sve_fp_2op_p_zd_HSD<0b01101, "fsqrt">;
|
|
|
|
|
[AArch64][SVE] Asm: Support for AND, ORR, EOR and BIC instructions.
This patch addresses the following variants:
- bitmask immediate, e.g. 'and z0.d, z0.d, #0x6'.
- unpredicated data vectors, e.g. 'and z0.d, z1.d, z2.d'.
- predicated data vectors, e.g. 'and z0.d, p0/m, z0.d, z1.d'.
And also several aliases, such as:
- ORN, alias of ORR.
- EON, alias of EOR.
- BIC, alias of AND (immediate variant)
- MOV, alias of ORR (if unpredicated and source register operands are the same)
Reviewers: rengolin, huntergr, fhahn, samparker, SjoerdMeijer, javed.absar
Reviewed By: fhahn
Differential Revision: https://reviews.llvm.org/D47363
llvm-svn: 333414
2018-05-29 21:08:43 +08:00
|
|
|
// InstAliases
|
|
|
|
def : InstAlias<"mov $Zd, $Zn",
|
|
|
|
(ORR_ZZZ ZPR64:$Zd, ZPR64:$Zn, ZPR64:$Zn), 1>;
|
2018-06-17 18:11:04 +08:00
|
|
|
def : InstAlias<"mov $Pd, $Pg/m, $Pn",
|
|
|
|
(SEL_PPPP PPR8:$Pd, PPRAny:$Pg, PPR8:$Pn, PPR8:$Pd), 1>;
|
[AArch64][SVE] Asm: Support for bitwise operations on predicate vectors.
This patch adds support for instructions performing bitwise operations
on predicate vectors, including AND, BIC, EOR, NAND, NOR, ORN, ORR, and
their status flag setting variants ANDS, BICS, EORS, NANDS, ORNS, ORRS.
This patch also adds several aliases:
orr p0.b, p1/z, p1.b, p1.b => mov p0.b, p1.b
orrs p0.b, p1/z, p1.b, p1.b => movs p0.b, p1.b
and p0.b, p1/z, p2.b, p2.b => mov p0.b, p1/z, p2.b
ands p0.b, p1/z, p2.b, p2.b => movs p0.b, p1/z, p2.b
eor p0.b, p1/z, p2.b, p1.b => not p0.b, p1/z, p2.b
eors p0.b, p1/z, p2.b, p1.b => nots p0.b, p1/z, p2.b
llvm-svn: 334906
2018-06-17 18:48:21 +08:00
|
|
|
def : InstAlias<"mov $Pd, $Pn",
|
|
|
|
(ORR_PPzPP PPR8:$Pd, PPR8:$Pn, PPR8:$Pn, PPR8:$Pn), 1>;
|
|
|
|
def : InstAlias<"mov $Pd, $Pg/z, $Pn",
|
|
|
|
(AND_PPzPP PPR8:$Pd, PPRAny:$Pg, PPR8:$Pn, PPR8:$Pn), 1>;
|
|
|
|
|
|
|
|
def : InstAlias<"movs $Pd, $Pn",
|
|
|
|
(ORRS_PPzPP PPR8:$Pd, PPR8:$Pn, PPR8:$Pn, PPR8:$Pn), 1>;
|
|
|
|
def : InstAlias<"movs $Pd, $Pg/z, $Pn",
|
|
|
|
(ANDS_PPzPP PPR8:$Pd, PPRAny:$Pg, PPR8:$Pn, PPR8:$Pn), 1>;
|
|
|
|
|
|
|
|
def : InstAlias<"not $Pd, $Pg/z, $Pn",
|
|
|
|
(EOR_PPzPP PPR8:$Pd, PPRAny:$Pg, PPR8:$Pn, PPRAny:$Pg), 1>;
|
|
|
|
|
|
|
|
def : InstAlias<"nots $Pd, $Pg/z, $Pn",
|
|
|
|
(EORS_PPzPP PPR8:$Pd, PPRAny:$Pg, PPR8:$Pn, PPRAny:$Pg), 1>;
|
[AArch64][SVE] Asm: Support for vector element compares.
This patch adds instructions for comparing elements from two vectors, e.g.
cmpgt p0.s, p0/z, z0.s, z1.s
and also adds support for comparing to a 64-bit wide element vector, e.g.
cmpgt p0.s, p0/z, z0.s, z1.d
The patch also contains aliases for certain comparisons, e.g.:
cmple p0.s, p0/z, z0.s, z1.s => cmpge p0.s, p0/z, z1.s, z0.s
cmplo p0.s, p0/z, z0.s, z1.s => cmphi p0.s, p0/z, z1.s, z0.s
cmpls p0.s, p0/z, z0.s, z1.s => cmphs p0.s, p0/z, z1.s, z0.s
cmplt p0.s, p0/z, z0.s, z1.s => cmpgt p0.s, p0/z, z1.s, z0.s
llvm-svn: 334931
2018-06-18 18:59:19 +08:00
|
|
|
|
|
|
|
def : InstAlias<"cmple $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGE_PPzZZ_B PPR8:$Zd, PPR3bAny:$Pg, ZPR8:$Zn, ZPR8:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmple $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGE_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmple $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGE_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmple $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGE_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHI_PPzZZ_B PPR8:$Zd, PPR3bAny:$Pg, ZPR8:$Zn, ZPR8:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHI_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHI_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplo $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHI_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"cmpls $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHS_PPzZZ_B PPR8:$Zd, PPR3bAny:$Pg, ZPR8:$Zn, ZPR8:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmpls $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHS_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmpls $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHS_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmpls $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPHS_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"cmplt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGT_PPzZZ_B PPR8:$Zd, PPR3bAny:$Pg, ZPR8:$Zn, ZPR8:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGT_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGT_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"cmplt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(CMPGT_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
[AArch64][SVE] Asm: Support for vector element FP compare.
Contains the following variants:
- Compare with (elements from) other vector
instructions: fcmeq, fcmgt, fcmge, fcmne, fcmuo.
aliases: fcmle, fcmlt.
e.g. fcmle p0.h, p0/z, z0.h, z1.h => fcmge p0.h, p0/z, z1.h, z0.h
- Compare absolute values with (absolute values from) other vector.
instructions: facge, facgt.
aliases: facle, faclt.
e.g. facle p0.h, p0/z, z0.h, z1.h => facge p0.h, p0/z, z1.h, z0.h
- Compare vector elements with #0.0
instructions: fcmeq, fcmgt, fcmge, fcmle, fcmlt, fcmne.
e.g. fcmle p0.h, p0/z, z0.h, #0.0
llvm-svn: 336182
2018-07-03 17:07:23 +08:00
|
|
|
|
|
|
|
def : InstAlias<"facle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGE_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"facle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGE_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"facle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGE_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"faclt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGT_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"faclt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGT_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"faclt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FACGT_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"fcmle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGE_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"fcmle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGE_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"fcmle $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGE_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
|
|
|
|
|
|
|
def : InstAlias<"fcmlt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGT_PPzZZ_H PPR16:$Zd, PPR3bAny:$Pg, ZPR16:$Zn, ZPR16:$Zm), 0>;
|
|
|
|
def : InstAlias<"fcmlt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGT_PPzZZ_S PPR32:$Zd, PPR3bAny:$Pg, ZPR32:$Zn, ZPR32:$Zm), 0>;
|
|
|
|
def : InstAlias<"fcmlt $Zd, $Pg/z, $Zm, $Zn",
|
|
|
|
(FCMGT_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
2017-11-08 00:58:13 +08:00
|
|
|
}
|