From cc273983f705ebfbbe42d6327a626ce87c494c96 Mon Sep 17 00:00:00 2001 From: Bradley Smith Date: Tue, 22 Jun 2021 16:34:15 +0100 Subject: [PATCH] [AArch64][SVE] Improve fixed length codegen for common vector shuffle case Improve codegen when lowering the common vector shuffle case from the vectorizer (op1[last]:op2[0:last-1]). This patch only handles this common case as it is difficult to handle this more generally when using fixed length vectors, due to being unable to use the SVE ext instruction. Differential Revision: https://reviews.llvm.org/D105289 --- .../Target/AArch64/AArch64ISelLowering.cpp | 40 + llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 + .../sve-fixed-length-vector-shuffle.ll | 898 ++++++++++++++++++ 3 files changed, 940 insertions(+) create mode 100644 llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 4315dacd9334..61e278072b9c 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1560,6 +1560,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::VSELECT, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); setOperationAction(ISD::ZERO_EXTEND, VT, Custom); @@ -9229,6 +9230,9 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, ShuffleVectorSDNode *SVN = cast(Op.getNode()); + if (useSVEForFixedLengthVectorVT(VT)) + return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG); + // Convert shuffles that are directly supported on NEON to target-specific // DAG nodes, instead of keeping them as shuffles and matching them again // during code selection. This is more efficient and avoids the possibility @@ -18428,6 +18432,42 @@ AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op, } } +SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE( + SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); + + auto *SVN = cast(Op.getNode()); + auto ShuffleMask = SVN->getMask(); + + SDLoc DL(Op); + SDValue Op1 = Op.getOperand(0); + SDValue Op2 = Op.getOperand(1); + + EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); + Op1 = convertToScalableVector(DAG, ContainerVT, Op1); + Op2 = convertToScalableVector(DAG, ContainerVT, Op2); + + bool ReverseEXT = false; + unsigned Imm; + if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) && + Imm == VT.getVectorNumElements() - 1) { + if (ReverseEXT) + std::swap(Op1, Op2); + + EVT ScalarTy = VT.getVectorElementType(); + if ((ScalarTy == MVT::i8) || (ScalarTy == MVT::i16)) + ScalarTy = MVT::i32; + SDValue Scalar = DAG.getNode( + ISD::EXTRACT_VECTOR_ELT, DL, ScalarTy, Op1, + DAG.getConstant(VT.getVectorNumElements() - 1, DL, MVT::i64)); + Op = DAG.getNode(AArch64ISD::INSR, DL, ContainerVT, Op2, Scalar); + return convertFromScalableVector(DAG, VT, Op); + } + + return SDValue(); +} + SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 3d74b04cd01d..088a42bc91cc 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1006,6 +1006,8 @@ private: SDValue LowerFixedLengthFPRoundToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthIntToFPToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op, + SelectionDAG &DAG) const; SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl &Created) const override; diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll new file mode 100644 index 000000000000..00fb4a38ad73 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll @@ -0,0 +1,898 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256 +; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +; Don't use SVE for 64-bit vectors +define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i8 +; CHECK: ext v0.8b, v0.8b, v1.8b, #7 +; CHECK-NEXT: ret + %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> + ret <8 x i8> %ret +} + +; Don't use SVE for 128-bit vectors +define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) { +; CHECK-LABEL: shuffle_ext_byone_v16i8 +; CHECK: ext v0.16b, v0.16b, v1.16b, #15 +; CHECK-NEXT: ret + %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> + ret <16 x i8> %ret +} + +define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i8 +; CHECK: ptrue [[PG:p[0-9]+]].b, vl32 +; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].b, [[OP1]].b[31] +; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; CHECK-NEXT: insr [[OP2]].b, [[TMP]] +; CHECK-NEXT: st1b { [[OP2]].b }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> + store <32 x i8> %ret, <32 x i8>* %a + ret void +} + +define void @shuffle_ext_byone_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v64i8 +; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].b, vl32 +; VBITS_EQ_256-NEXT: mov w8, #32 +; VBITS_EQ_256-NEXT: ld1b { [[OP1_HI:z[0-9]+]].b }, [[PG]]/z, [x0, x8] +; VBITS_EQ_256-NEXT: ld1b { [[OP2_HI:z[0-9]+]].b }, [[PG]]/z, [x1, x8] +; VBITS_EQ_256-NEXT: ld1b { [[OP2_LO:z[0-9]+]].b }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].b, [[OP1_HI]].b[31] +; VBITS_EQ_256-NEXT: fmov [[TMP1:w[0-9]+]], s[[ELEM1]] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].b, [[OP2_LO]].b[31] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].b, [[TMP1]] +; VBITS_EQ_256-NEXT: fmov [[TMP2:w[0-9]+]], s[[ELEM2]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].b, [[TMP2]] +; VBITS_EQ_256-NEXT: st1b { [[OP2_HI]].b }, [[PG]], [x0, x8] +; VBITS_EQ_256-NEXT: st1b { [[OP2_LO]].b }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64 +; VBITS_GE_512-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].b, [[OP1]].b[63] +; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; VBITS_GE_512-NEXT: insr [[OP2]].b, [[TMP]] +; VBITS_GE_512-NEXT: st1b { [[OP2]].b }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <64 x i8>, <64 x i8>* %a + %op2 = load <64 x i8>, <64 x i8>* %b + %ret = shufflevector <64 x i8> %op1, <64 x i8> %op2, <64 x i32> + store <64 x i8> %ret, <64 x i8>* %a + ret void +} + +define void @shuffle_ext_byone_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v128i8 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128 +; VBITS_GE_1024-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #127 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].b, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].b +; VBITS_GE_1024-NEXT: insr [[OP2]].b, [[TMP2]] +; VBITS_GE_1024-NEXT: st1b { [[OP2]].b }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <128 x i8>, <128 x i8>* %a + %op2 = load <128 x i8>, <128 x i8>* %b + %ret = shufflevector <128 x i8> %op1, <128 x i8> %op2, <128 x i32> + store <128 x i8> %ret, <128 x i8>* %a + ret void +} + +define void @shuffle_ext_byone_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v256i8 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256 +; VBITS_GE_2048-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #255 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].b, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].b +; VBITS_GE_2048-NEXT: insr [[OP2]].b, [[TMP2]] +; VBITS_GE_2048-NEXT: st1b { [[OP2]].b }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <256 x i8>, <256 x i8>* %a + %op2 = load <256 x i8>, <256 x i8>* %b + %ret = shufflevector <256 x i8> %op1, <256 x i8> %op2, <256 x i32> + store <256 x i8> %ret, <256 x i8>* %a + ret void +} + +; Don't use SVE for 64-bit vectors +define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i16 +; CHECK: ext v0.8b, v0.8b, v1.8b, #6 +; CHECK-NEXT: ret + %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> + ret <4 x i16> %ret +} + +; Don't use SVE for 128-bit vectors +define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i16 +; CHECK: ext v0.16b, v0.16b, v1.16b, #14 +; CHECK-NEXT: ret + %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> + ret <8 x i16> %ret +} + +define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i16 +; CHECK: ptrue [[PG:p[0-9]+]].h, vl16 +; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[15] +; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; CHECK-NEXT: insr [[OP2]].h, [[TMP]] +; CHECK-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> + store <16 x i16> %ret, <16 x i16>* %a + ret void +} + +define void @shuffle_ext_byone_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i16 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].h, vl16 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1h { [[OP1_HI:z[0-9]+]].h }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1h { [[OP2_HI:z[0-9]+]].h }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1h { [[OP2_LO:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].h, [[OP1_HI]].h[15] +; VBITS_EQ_256-NEXT: fmov [[TMP1:w[0-9]+]], s[[ELEM1]] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].h, [[OP2_LO]].h[15] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].h, [[TMP1]] +; VBITS_EQ_256-NEXT: fmov [[TMP2:w[0-9]+]], s[[ELEM2]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].h, [[TMP2]] +; VBITS_EQ_256-NEXT: st1h { [[OP2_HI]].h }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1h { [[OP2_LO]].h }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[31] +; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; VBITS_GE_512-NEXT: insr [[OP2]].h, [[TMP]] +; VBITS_GE_512-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <32 x i16>, <32 x i16>* %a + %op2 = load <32 x i16>, <32 x i16>* %b + %ret = shufflevector <32 x i16> %op1, <32 x i16> %op2, <32 x i32> + store <32 x i16> %ret, <32 x i16>* %a + ret void +} + +define void @shuffle_ext_byone_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v64i16 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64 +; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #63 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].h +; VBITS_GE_1024-NEXT: insr [[OP2]].h, [[TMP2]] +; VBITS_GE_1024-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <64 x i16>, <64 x i16>* %a + %op2 = load <64 x i16>, <64 x i16>* %b + %ret = shufflevector <64 x i16> %op1, <64 x i16> %op2, <64 x i32> + store <64 x i16> %ret, <64 x i16>* %a + ret void +} + +define void @shuffle_ext_byone_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v128i16 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128 +; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #127 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].h +; VBITS_GE_2048-NEXT: insr [[OP2]].h, [[TMP2]] +; VBITS_GE_2048-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <128 x i16>, <128 x i16>* %a + %op2 = load <128 x i16>, <128 x i16>* %b + %ret = shufflevector <128 x i16> %op1, <128 x i16> %op2, <128 x i32> + store <128 x i16> %ret, <128 x i16>* %a + ret void +} + +; Don't use SVE for 64-bit vectors +define <2 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i32 +; CHECK: ext v0.8b, v0.8b, v1.8b, #4 +; CHECK-NEXT: ret + %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> + ret <2 x i32> %ret +} + +; Don't use SVE for 128-bit vectors +define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i32 +; CHECK: ext v0.16b, v0.16b, v1.16b, #12 +; CHECK-NEXT: ret + %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> + ret <4 x i32> %ret +} + +define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i32 +; CHECK: ptrue [[PG:p[0-9]+]].s, vl8 +; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[7] +; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; CHECK-NEXT: insr [[OP2]].s, [[TMP]] +; CHECK-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> + store <8 x i32> %ret, <8 x i32>* %a + ret void +} + +define void @shuffle_ext_byone_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i32 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].s, vl8 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1w { [[OP1_HI:z[0-9]+]].s }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1w { [[OP2_HI:z[0-9]+]].s }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1w { [[OP2_LO:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].s, [[OP1_HI]].s[7] +; VBITS_EQ_256-NEXT: fmov [[TMP1:w[0-9]+]], s[[ELEM1]] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].s, [[OP2_LO]].s[7] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].s, [[TMP1]] +; VBITS_EQ_256-NEXT: fmov [[TMP2:w[0-9]+]], s[[ELEM2]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].s, [[TMP2]] +; VBITS_EQ_256-NEXT: st1w { [[OP2_HI]].s }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1w { [[OP2_LO]].s }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[15] +; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]] +; VBITS_GE_512-NEXT: insr [[OP2]].s, [[TMP]] +; VBITS_GE_512-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <16 x i32>, <16 x i32>* %a + %op2 = load <16 x i32>, <16 x i32>* %b + %ret = shufflevector <16 x i32> %op1, <16 x i32> %op2, <16 x i32> + store <16 x i32> %ret, <16 x i32>* %a + ret void +} + +define void @shuffle_ext_byone_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i32 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32 +; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #31 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].s +; VBITS_GE_1024-NEXT: insr [[OP2]].s, [[TMP2]] +; VBITS_GE_1024-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <32 x i32>, <32 x i32>* %a + %op2 = load <32 x i32>, <32 x i32>* %b + %ret = shufflevector <32 x i32> %op1, <32 x i32> %op2, <32 x i32> + store <32 x i32> %ret, <32 x i32>* %a + ret void +} + +define void @shuffle_ext_byone_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v64i32 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64 +; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #63 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].s +; VBITS_GE_2048-NEXT: insr [[OP2]].s, [[TMP2]] +; VBITS_GE_2048-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <64 x i32>, <64 x i32>* %a + %op2 = load <64 x i32>, <64 x i32>* %b + %ret = shufflevector <64 x i32> %op1, <64 x i32> %op2, <64 x i32> + store <64 x i32> %ret, <64 x i32>* %a + ret void +} + +; Don't use SVE for 128-bit vectors +define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i64 +; CHECK: ext v0.16b, v0.16b, v1.16b, #8 +; CHECK-NEXT: ret + %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> + ret <2 x i64> %ret +} + +define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i64 +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[3] +; CHECK-NEXT: fmov [[TMP:x[0-9]+]], d[[ELEM]] +; CHECK-NEXT: insr [[OP2]].d, [[TMP]] +; CHECK-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> + store <4 x i64> %ret, <4 x i64>* %a + ret void +} + +define void @shuffle_ext_byone_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i64 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].d, vl4 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1d { [[OP1_HI:z[0-9]+]].d }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1d { [[OP2_HI:z[0-9]+]].d }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1d { [[OP2_LO:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].d, [[OP1_HI]].d[3] +; VBITS_EQ_256-NEXT: fmov [[TMP1:x[0-9]+]], d[[ELEM1]] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].d, [[OP2_LO]].d[3] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].d, [[TMP1]] +; VBITS_EQ_256-NEXT: fmov [[TMP2:x[0-9]+]], d[[ELEM2]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].d, [[TMP2]] +; VBITS_EQ_256-NEXT: st1d { [[OP2_HI]].d }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1d { [[OP2_LO]].d }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[7] +; VBITS_GE_512-NEXT: fmov [[TMP:x[0-9]+]], d[[ELEM]] +; VBITS_GE_512-NEXT: insr [[OP2]].d, [[TMP]] +; VBITS_GE_512-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <8 x i64>, <8 x i64>* %a + %op2 = load <8 x i64>, <8 x i64>* %b + %ret = shufflevector <8 x i64> %op1, <8 x i64> %op2, <8 x i32> + store <8 x i64> %ret, <8 x i64>* %a + ret void +} + +define void @shuffle_ext_byone_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i64 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #15 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:x[0-9]+]], [[WPG]], [[OP1]].d +; VBITS_GE_1024-NEXT: insr [[OP2]].d, [[TMP2]] +; VBITS_GE_1024-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <16 x i64>, <16 x i64>* %a + %op2 = load <16 x i64>, <16 x i64>* %b + %ret = shufflevector <16 x i64> %op1, <16 x i64> %op2, <16 x i32> + store <16 x i64> %ret, <16 x i64>* %a + ret void +} + +define void @shuffle_ext_byone_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i64 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #31 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:x[0-9]+]], [[WPG]], [[OP1]].d +; VBITS_GE_2048-NEXT: insr [[OP2]].d, [[TMP2]] +; VBITS_GE_2048-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <32 x i64>, <32 x i64>* %a + %op2 = load <32 x i64>, <32 x i64>* %b + %ret = shufflevector <32 x i64> %op1, <32 x i64> %op2, <32 x i32> + store <32 x i64> %ret, <32 x i64>* %a + ret void +} + +; Don't use SVE for 64-bit vectors +define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f16 +; CHECK: ext v0.8b, v0.8b, v1.8b, #6 +; CHECK-NEXT: ret + %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> + ret <4 x half> %ret +} + +; Don't use SVE for 128-bit vectors +define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f16 +; CHECK: ext v0.16b, v0.16b, v1.16b, #14 +; CHECK-NEXT: ret + %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> + ret <8 x half> %ret +} + +define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f16 +; CHECK: ptrue [[PG:p[0-9]+]].h, vl16 +; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[15] +; CHECK-NEXT: insr [[OP2]].h, h[[ELEM]] +; CHECK-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> + store <16 x half> %ret, <16 x half>* %a + ret void +} + +define void @shuffle_ext_byone_v32f16(<32 x half>* %a, <32 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32f16 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].h, vl16 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1h { [[OP1_HI:z[0-9]+]].h }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1h { [[OP2_HI:z[0-9]+]].h }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1h { [[OP2_LO:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].h, [[OP1_HI]].h[15] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].h, [[OP2_LO]].h[15] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].h, h[[ELEM1]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].h, h[[ELEM2]] +; VBITS_EQ_256-NEXT: st1h { [[OP2_HI]].h }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1h { [[OP2_LO]].h }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[31] +; VBITS_GE_512-NEXT: insr [[OP2]].h, h[[ELEM]] +; VBITS_GE_512-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <32 x half>, <32 x half>* %a + %op2 = load <32 x half>, <32 x half>* %b + %ret = shufflevector <32 x half> %op1, <32 x half> %op2, <32 x i32> + store <32 x half> %ret, <32 x half>* %a + ret void +} + +define void @shuffle_ext_byone_v64f16(<64 x half>* %a, <64 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v64f16 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64 +; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #63 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:h[0-9]+]], [[WPG]], [[OP1]].h +; VBITS_GE_1024-NEXT: insr [[OP2]].h, [[TMP2]] +; VBITS_GE_1024-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <64 x half>, <64 x half>* %a + %op2 = load <64 x half>, <64 x half>* %b + %ret = shufflevector <64 x half> %op1, <64 x half> %op2, <64 x i32> + store <64 x half> %ret, <64 x half>* %a + ret void +} + +define void @shuffle_ext_byone_v128f16(<128 x half>* %a, <128 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v128f16 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128 +; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #127 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:h[0-9]+]], [[WPG]], [[OP1]].h +; VBITS_GE_2048-NEXT: insr [[OP2]].h, [[TMP2]] +; VBITS_GE_2048-NEXT: st1h { [[OP2]].h }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <128 x half>, <128 x half>* %a + %op2 = load <128 x half>, <128 x half>* %b + %ret = shufflevector <128 x half> %op1, <128 x half> %op2, <128 x i32> + store <128 x half> %ret, <128 x half>* %a + ret void +} + +; Don't use SVE for 64-bit vectors +define <2 x float> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f32 +; CHECK: ext v0.8b, v0.8b, v1.8b, #4 +; CHECK-NEXT: ret + %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> + ret <2 x float> %ret +} + +; Don't use SVE for 128-bit vectors +define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f32 +; CHECK: ext v0.16b, v0.16b, v1.16b, #12 +; CHECK-NEXT: ret + %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> + ret <4 x float> %ret +} + +define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f32 +; CHECK: ptrue [[PG:p[0-9]+]].s, vl8 +; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[7] +; CHECK-NEXT: insr [[OP2]].s, s[[ELEM]] +; CHECK-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> + store <8 x float> %ret, <8 x float>* %a + ret void +} + +define void @shuffle_ext_byone_v16f32(<16 x float>* %a, <16 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f32 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].s, vl8 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1w { [[OP1_HI:z[0-9]+]].s }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1w { [[OP2_HI:z[0-9]+]].s }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1w { [[OP2_LO:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].s, [[OP1_HI]].s[7] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].s, [[OP2_LO]].s[7] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].s, s[[ELEM1]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].s, s[[ELEM2]] +; VBITS_EQ_256-NEXT: st1w { [[OP2_HI]].s }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1w { [[OP2_LO]].s }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[15] +; VBITS_GE_512-NEXT: insr [[OP2]].s, s[[ELEM]] +; VBITS_GE_512-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <16 x float>, <16 x float>* %a + %op2 = load <16 x float>, <16 x float>* %b + %ret = shufflevector <16 x float> %op1, <16 x float> %op2, <16 x i32> + store <16 x float> %ret, <16 x float>* %a + ret void +} + +define void @shuffle_ext_byone_v32f32(<32 x float>* %a, <32 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32f32 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32 +; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #31 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:s[0-9]+]], [[WPG]], [[OP1]].s +; VBITS_GE_1024-NEXT: insr [[OP2]].s, [[TMP2]] +; VBITS_GE_1024-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <32 x float>, <32 x float>* %a + %op2 = load <32 x float>, <32 x float>* %b + %ret = shufflevector <32 x float> %op1, <32 x float> %op2, <32 x i32> + store <32 x float> %ret, <32 x float>* %a + ret void +} + +define void @shuffle_ext_byone_v64f32(<64 x float>* %a, <64 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v64f32 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64 +; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #63 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:s[0-9]+]], [[WPG]], [[OP1]].s +; VBITS_GE_2048-NEXT: insr [[OP2]].s, [[TMP2]] +; VBITS_GE_2048-NEXT: st1w { [[OP2]].s }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <64 x float>, <64 x float>* %a + %op2 = load <64 x float>, <64 x float>* %b + %ret = shufflevector <64 x float> %op1, <64 x float> %op2, <64 x i32> + store <64 x float> %ret, <64 x float>* %a + ret void +} + +; Don't use SVE for 128-bit vectors +define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f64 +; CHECK: ext v0.16b, v0.16b, v1.16b, #8 +; CHECK-NEXT: ret + %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> + ret <2 x double> %ret +} + +define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f64 +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[3] +; CHECK-NEXT: insr [[OP2]].d, d[[ELEM]] +; CHECK-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_byone_v8f64(<8 x double>* %a, <8 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f64 +; VBITS_EQ_256: add x8, x0, #32 +; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].d, vl4 +; VBITS_EQ_256-NEXT: add x9, x1, #32 +; VBITS_EQ_256-NEXT: ld1d { [[OP1_HI:z[0-9]+]].d }, [[PG]]/z, [x8] +; VBITS_EQ_256-NEXT: ld1d { [[OP2_HI:z[0-9]+]].d }, [[PG]]/z, [x9] +; VBITS_EQ_256-NEXT: ld1d { [[OP2_LO:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_EQ_256-NEXT: mov z[[ELEM1:[0-9]+]].d, [[OP1_HI]].d[3] +; VBITS_EQ_256-NEXT: mov z[[ELEM2:[0-9]+]].d, [[OP2_LO]].d[3] +; VBITS_EQ_256-NEXT: insr [[OP2_LO]].d, d[[ELEM1]] +; VBITS_EQ_256-NEXT: insr [[OP2_HI]].d, d[[ELEM2]] +; VBITS_EQ_256-NEXT: st1d { [[OP2_HI]].d }, [[PG]], [x8] +; VBITS_EQ_256-NEXT: st1d { [[OP2_LO]].d }, [[PG]], [x0] +; VBITS_EQ_256-NEXT: ret + +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[7] +; VBITS_GE_512-NEXT: insr [[OP2]].d, d[[ELEM]] +; VBITS_GE_512-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_512-NEXT: ret + %op1 = load <8 x double>, <8 x double>* %a + %op2 = load <8 x double>, <8 x double>* %b + %ret = shufflevector <8 x double> %op1, <8 x double> %op2, <8 x i32> + store <8 x double> %ret, <8 x double>* %a + ret void +} + +define void @shuffle_ext_byone_v16f64(<16 x double>* %a, <16 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f64 +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #15 +; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]] +; VBITS_GE_1024-NEXT: lastb [[TMP2:d[0-9]+]], [[WPG]], [[OP1]].d +; VBITS_GE_1024-NEXT: insr [[OP2]].d, [[TMP2]] +; VBITS_GE_1024-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_1024-NEXT: ret + %op1 = load <16 x double>, <16 x double>* %a + %op2 = load <16 x double>, <16 x double>* %b + %ret = shufflevector <16 x double> %op1, <16 x double> %op2, <16 x i32> + store <16 x double> %ret, <16 x double>* %a + ret void +} + +define void @shuffle_ext_byone_v32f64(<32 x double>* %a, <32 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32f64 +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #31 +; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]] +; VBITS_GE_2048-NEXT: lastb [[TMP2:d[0-9]+]], [[WPG]], [[OP1]].d +; VBITS_GE_2048-NEXT: insr [[OP2]].d, [[TMP2]] +; VBITS_GE_2048-NEXT: st1d { [[OP2]].d }, [[PG]], [x0] +; VBITS_GE_2048-NEXT: ret + %op1 = load <32 x double>, <32 x double>* %a + %op2 = load <32 x double>, <32 x double>* %b + %ret = shufflevector <32 x double> %op1, <32 x double> %op2, <32 x i32> + store <32 x double> %ret, <32 x double>* %a + ret void +} + +define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_reverse +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP2]].d[3] +; CHECK-NEXT: insr [[OP1]].d, d[[ELEM]] +; CHECK-NEXT: st1d { [[OP1]].d }, [[PG]], [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_invalid +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1] +; CHECK-NEXT: mov x8, sp +; CHECK-NEXT: mov z2.d, [[OP1]].d[3] +; CHECK-NEXT: mov z3.d, [[OP2]].d[1] +; CHECK-NEXT: mov z0.d, [[OP1]].d[2] +; CHECK-NEXT: stp d1, d3, [sp, #16] +; CHECK-NEXT: stp d0, d2, [sp] +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8] +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x29, x30, [sp], #16 +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" }