[AArch64][SVE] Add ISel pattern to lower DUPLANE128 to LD1RQD

Following on from https://reviews.llvm.org/D128902, lower DUPLANE128 to LD1RQD
for integer load types from instruction selection.

Differential Revision: https://reviews.llvm.org/D130010
This commit is contained in:
Matt Devereau 2022-07-12 15:54:53 +00:00
parent 2feb99b02c
commit e0fbd990c9
4 changed files with 129 additions and 97 deletions

View File

@ -705,6 +705,9 @@ def concat_vectors : SDNode<"ISD::CONCAT_VECTORS",
def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
[]>;
def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVec<2>, SDTCisInt<3>]>,
[]>;
// This operator does subvector type checking.
def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;

View File

@ -875,6 +875,16 @@ let Predicates = [HasSVEorSME] in {
defm LD1RQ_W : sve_mem_ldqr_ss<0b10, "ld1rqw", Z_s, ZPR32, GPR64NoXZRshifted32>;
defm LD1RQ_D : sve_mem_ldqr_ss<0b11, "ld1rqd", Z_d, ZPR64, GPR64NoXZRshifted64>;
let AddedComplexity = 1 in {
class LD1RQPat<ValueType vt1, ValueType vt2, SDPatternOperator op, Instruction load_instr, Instruction ptrue> :
Pat<(vt1 (op (vt1 (vector_insert_subvec (vt1 undef), (vt2 (load GPR64sp:$Xn)), (i64 0))), (i64 0))),
(load_instr (ptrue 31), GPR64sp:$Xn, 0)>;
}
def : LD1RQPat<nxv16i8, v16i8, AArch64duplane128, LD1RQ_B_IMM, PTRUE_B>;
def : LD1RQPat<nxv8i16, v8i16, AArch64duplane128, LD1RQ_H_IMM, PTRUE_H>;
def : LD1RQPat<nxv4i32, v4i32, AArch64duplane128, LD1RQ_W_IMM, PTRUE_S>;
def : LD1RQPat<nxv2i64, v2i64, AArch64duplane128, LD1RQ_D_IMM, PTRUE_D>;
// continuous load with reg+reg addressing.
defm LD1B : sve_mem_cld_ss<0b0000, "ld1b", Z_b, ZPR8, GPR64NoXZRshifted8>;
defm LD1B_H : sve_mem_cld_ss<0b0001, "ld1b", Z_h, ZPR16, GPR64NoXZRshifted8>;

View File

@ -580,103 +580,6 @@ define <vscale x 2 x i64> @dupq_i64_range(<vscale x 2 x i64> %a) {
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %a, i64 4)
ret <vscale x 2 x i64> %out
}
define dso_local <vscale x 2 x double> @dupq_ld1rqd_f64() {
; CHECK-LABEL: dupq_ld1rqd_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI49_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI49_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> <double 1.000000e+00, double 2.000000e+00>, i64 0)
%2 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
ret <vscale x 2 x double> %2
}
define dso_local <vscale x 4 x float> @dupq_ld1rqw_f32() {
; CHECK-LABEL: dupq_ld1rqw_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI50_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI50_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00>, i64 0)
%2 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
ret <vscale x 4 x float> %2
}
define dso_local <vscale x 8 x half> @dupq_ld1rqh_f16() {
; CHECK-LABEL: dupq_ld1rqh_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI51_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI51_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400, half 0xH4500, half 0xH4600, half 0xH4700, half 0xH4800>, i64 0)
%2 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
ret <vscale x 8 x half> %2
}
define dso_local <vscale x 8 x bfloat> @dupq_ld1rqh_bf16() #0 {
; CHECK-LABEL: dupq_ld1rqh_bf16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI52_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI52_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> <bfloat 1.000e+00, bfloat 2.000e+00, bfloat 3.000e+00, bfloat 4.000e+00, bfloat 5.000e+00, bfloat 6.000e+00, bfloat 7.000e+00, bfloat 8.000e+00>, i64 0)
%2 = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
ret <vscale x 8 x bfloat> %2
}
define dso_local <vscale x 2 x i64> @dupq_ld1rqd_i64() {
; CHECK-LABEL: dupq_ld1rqd_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI53_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI53_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> <i64 1, i64 2>, i64 0)
%2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
ret <vscale x 2 x i64> %2
}
define dso_local <vscale x 4 x i32> @dupq_ld1rqd_i32() {
; CHECK-LABEL: dupq_ld1rqd_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI54_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI54_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i64 0)
%2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
ret <vscale x 4 x i32> %2
}
define dso_local <vscale x 8 x i16> @dupq_ld1rqd_i16() {
; CHECK-LABEL: dupq_ld1rqd_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI55_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI55_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i64 0)
%2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
ret <vscale x 8 x i16> %2
}
define dso_local <vscale x 16 x i8> @dupq_ld1rqd_i8() {
; CHECK-LABEL: dupq_ld1rqd_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI56_0
; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI56_0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, i64 0)
%2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
ret <vscale x 16 x i8> %2
}
;
; EXT
;

View File

@ -722,3 +722,119 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(double* %valp)
%shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x double> %shf
}
define <vscale x 2 x double> @dupq_ld1rqd_f64(<2 x double>* %a) {
; CHECK-LABEL: dupq_ld1rqd_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = load <2 x double>, <2 x double>* %a
%2 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %1, i64 0)
%3 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %2, i64 0)
ret <vscale x 2 x double> %3
}
define <vscale x 4 x float> @dupq_ld1rqw_f32(<4 x float>* %a) {
; CHECK-LABEL: dupq_ld1rqw_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = load <4 x float>, <4 x float>* %a
%2 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %1, i64 0)
%3 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %2, i64 0)
ret <vscale x 4 x float> %3
}
define <vscale x 8 x half> @dupq_ld1rqh_f16(<8 x half>* %a) {
; CHECK-LABEL: dupq_ld1rqh_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = load <8 x half>, <8 x half>* %a
%2 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %1, i64 0)
%3 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %2, i64 0)
ret <vscale x 8 x half> %3
}
define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(<8 x bfloat>* %a) #0 {
; CHECK-LABEL: dupq_ld1rqh_bf16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: mov z0.q, q0
; CHECK-NEXT: ret
%1 = load <8 x bfloat>, <8 x bfloat>* %a
%2 = tail call fast <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %1, i64 0)
%3 = tail call fast <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %2, i64 0)
ret <vscale x 8 x bfloat> %3
}
define <vscale x 2 x i64> @dupq_ld1rqd_i64(<2 x i64>* %a) #0 {
; CHECK-LABEL: dupq_ld1rqd_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rqd { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <2 x i64>, <2 x i64>* %a
%2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %1, i64 0)
%3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2, i64 0)
ret <vscale x 2 x i64> %3
}
define <vscale x 4 x i32> @dupq_ld1rqw_i32(<4 x i32>* %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rqw { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <4 x i32>, <4 x i32>* %a
%2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %1, i64 0)
%3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2, i64 0)
ret <vscale x 4 x i32> %3
}
define <vscale x 8 x i16> @dupq_ld1rqw_i16(<8 x i16>* %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <8 x i16>, <8 x i16>* %a
%2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %1, i64 0)
%3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2, i64 0)
ret <vscale x 8 x i16> %3
}
define <vscale x 16 x i8> @dupq_ld1rqw_i8(<16 x i8>* %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x0]
; CHECK-NEXT: ret
%1 = load <16 x i8>, <16 x i8>* %a
%2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %1, i64 0)
%3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2, i64 0)
ret <vscale x 16 x i8> %3
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8>, i64)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16>, i64)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32>, i64)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64>, i64)
declare <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half>, i64)
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat>, i64)
declare <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float>, i64)
declare <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double>, i64)
declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)
attributes #0 = { "target-features"="+sve,+bf16" }