[RISCV] Optimize select-like vector shuffles

This patch adds a small optimization for vector shuffle lowering,
detecting shuffles which can be re-expressed as vector selects.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D99270
This commit is contained in:
Fraser Cormack 2021-03-24 14:54:20 +00:00
parent 06411edb9f
commit 99211352c1
3 changed files with 83 additions and 414 deletions

View File

@ -1371,8 +1371,11 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
SDLoc DL(Op);
MVT XLenVT = Subtarget.getXLenVT();
MVT VT = Op.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
if (SVN->isSplat()) {
@ -1382,11 +1385,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
DAG, VT, Subtarget);
V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!");
assert(Lane < (int)NumElts && "Unexpected lane!");
SDValue Mask, VL;
std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
MVT XLenVT = Subtarget.getXLenVT();
SDValue Gather =
DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
DAG.getConstant(Lane, DL, XLenVT), Mask, VL);
@ -1394,6 +1396,29 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
}
}
// Detect shuffles which can be re-expressed as vector selects.
SmallVector<SDValue> MaskVals;
// By default we preserve the original operand order, and select LHS as true
// and RHS as false. However, since RVV vector selects may feature splats but
// only on the LHS, we may choose to invert our mask and instead select
// between RHS and LHS.
bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
int MaskIndex = MaskIdx.value();
bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
});
if (IsSelect) {
assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, SwapOps ? V2 : V1,
SwapOps ? V1 : V2);
}
return SDValue();
}

View File

@ -1,300 +1,59 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
; CHECK-LABEL: shuffle_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 3
; CHECK-NEXT: vfmv.f.s ft0, v25
; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v25, ft0
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v9, 2
; CHECK-NEXT: vfmv.f.s ft0, v26
; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 1
; CHECK-NEXT: vfmv.f.s ft0, v25
; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v25, ft0
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: addi a0, zero, 11
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
ret <4 x half> %s
}
define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) {
; RV32-LABEL: shuffle_v8f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -64
; RV32-NEXT: .cfi_def_cfa_offset 64
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 64
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -32
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 7
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.v.f v25, ft0
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 6
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.s.f v25, ft0
; RV32-NEXT: addi a0, sp, 24
; RV32-NEXT: vse32.v v25, (a0)
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 5
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.v.f v25, ft0
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v10, 4
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.s.f v25, ft0
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vse32.v v25, (a0)
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 3
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.v.f v25, ft0
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 2
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.s.f v25, ft0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vse32.v v25, (a0)
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v10, 1
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.v.f v25, ft0
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV32-NEXT: vfmv.f.s ft0, v10
; RV32-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV32-NEXT: vfmv.s.f v25, ft0
; RV32-NEXT: vse32.v v25, (sp)
; RV32-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v8, (sp)
; RV32-NEXT: addi sp, s0, -64
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 64
; RV32-NEXT: ret
;
; RV64-LABEL: shuffle_v8f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: .cfi_def_cfa_offset 64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 64
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -32
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 7
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.v.f v25, ft0
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 6
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.s.f v25, ft0
; RV64-NEXT: addi a0, sp, 24
; RV64-NEXT: vse32.v v25, (a0)
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 5
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.v.f v25, ft0
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v10, 4
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.s.f v25, ft0
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vse32.v v25, (a0)
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 3
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.v.f v25, ft0
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 2
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.s.f v25, ft0
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: vse32.v v25, (a0)
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v10, 1
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.v.f v25, ft0
; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV64-NEXT: vfmv.f.s ft0, v10
; RV64-NEXT: vsetivli a0, 2, e32,m1,ta,mu
; RV64-NEXT: vfmv.s.f v25, ft0
; RV64-NEXT: vse32.v v25, (sp)
; RV64-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; RV64-NEXT: vle32.v v8, (sp)
; RV64-NEXT: addi sp, s0, -64
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
; CHECK-LABEL: shuffle_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 236
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 8, i32 9, i32 2, i32 3, i32 12, i32 5, i32 6, i32 7>
ret <8 x float> %s
}
define <4 x double> @shuffle_fv_v4i16(<4 x double> %x) {
; RV32-LABEL: shuffle_fv_v4i16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -64
; RV32-NEXT: .cfi_def_cfa_offset 64
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 64
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -32
; RV32-NEXT: lui a0, %hi(.LCPI2_0)
; RV32-NEXT: fld ft0, %lo(.LCPI2_0)(a0)
; RV32-NEXT: fsd ft0, 24(sp)
; RV32-NEXT: fsd ft0, 0(sp)
; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 2
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: fsd ft0, 16(sp)
; RV32-NEXT: vslidedown.vi v26, v8, 1
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: fsd ft0, 8(sp)
; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; RV32-NEXT: vle64.v v8, (sp)
; RV32-NEXT: addi sp, s0, -64
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 64
; RV32-NEXT: ret
;
; RV64-LABEL: shuffle_fv_v4i16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: .cfi_def_cfa_offset 64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 64
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -32
; RV64-NEXT: lui a0, %hi(.LCPI2_0)
; RV64-NEXT: fld ft0, %lo(.LCPI2_0)(a0)
; RV64-NEXT: fsd ft0, 24(sp)
; RV64-NEXT: fsd ft0, 0(sp)
; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 2
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: fsd ft0, 16(sp)
; RV64-NEXT: vslidedown.vi v26, v8, 1
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: fsd ft0, 8(sp)
; RV64-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v8, (sp)
; RV64-NEXT: addi sp, s0, -64
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
; CHECK-LABEL: shuffle_fv_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 9
; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI2_0)(a1)
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x double> %s
}
define <4 x double> @shuffle_vf_v4i16(<4 x double> %x) {
; RV32-LABEL: shuffle_vf_v4i16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -64
; RV32-NEXT: .cfi_def_cfa_offset 64
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 64
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -32
; RV32-NEXT: lui a0, %hi(.LCPI3_0)
; RV32-NEXT: fld ft0, %lo(.LCPI3_0)(a0)
; RV32-NEXT: fsd ft0, 16(sp)
; RV32-NEXT: fsd ft0, 8(sp)
; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV32-NEXT: vfmv.f.s ft0, v8
; RV32-NEXT: fsd ft0, 0(sp)
; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 3
; RV32-NEXT: vfmv.f.s ft0, v26
; RV32-NEXT: fsd ft0, 24(sp)
; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; RV32-NEXT: vle64.v v8, (sp)
; RV32-NEXT: addi sp, s0, -64
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 64
; RV32-NEXT: ret
;
; RV64-LABEL: shuffle_vf_v4i16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: .cfi_def_cfa_offset 64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 64
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -32
; RV64-NEXT: lui a0, %hi(.LCPI3_0)
; RV64-NEXT: fld ft0, %lo(.LCPI3_0)(a0)
; RV64-NEXT: fsd ft0, 16(sp)
; RV64-NEXT: fsd ft0, 8(sp)
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vfmv.f.s ft0, v8
; RV64-NEXT: fsd ft0, 0(sp)
; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 3
; RV64-NEXT: vfmv.f.s ft0, v26
; RV64-NEXT: fsd ft0, 24(sp)
; RV64-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v8, (sp)
; RV64-NEXT: addi sp, s0, -64
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
; CHECK-LABEL: shuffle_vf_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 6
; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI3_0)(a1)
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e64,m2,ta,mu
; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x double> %s
}

View File

@ -1,122 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: shuffle_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: sh a0, 8(sp)
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 3
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 14(sp)
; CHECK-NEXT: vslidedown.vi v25, v9, 2
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 12(sp)
; CHECK-NEXT: vslidedown.vi v25, v8, 1
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 10(sp)
; CHECK-NEXT: addi a0, zero, 11
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
ret <4 x i16> %s
}
define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
; RV32-LABEL: shuffle_v8i32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -64
; RV32-NEXT: .cfi_def_cfa_offset 64
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 64
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: andi sp, sp, -32
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: sw a0, 0(sp)
; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v8, 7
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 28(sp)
; RV32-NEXT: vslidedown.vi v26, v8, 6
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 24(sp)
; RV32-NEXT: vslidedown.vi v26, v10, 5
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 20(sp)
; RV32-NEXT: vslidedown.vi v26, v10, 4
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: vslidedown.vi v26, v8, 3
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: vslidedown.vi v26, v10, 2
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vslidedown.vi v26, v8, 1
; RV32-NEXT: vmv.x.s a0, v26
; RV32-NEXT: sw a0, 4(sp)
; RV32-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v8, (sp)
; RV32-NEXT: addi sp, s0, -64
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 64
; RV32-NEXT: ret
;
; RV64-LABEL: shuffle_v8i32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -64
; RV64-NEXT: .cfi_def_cfa_offset 64
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 64
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -32
; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: sw a0, 0(sp)
; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v8, 7
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 28(sp)
; RV64-NEXT: vslidedown.vi v26, v8, 6
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 24(sp)
; RV64-NEXT: vslidedown.vi v26, v10, 5
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 20(sp)
; RV64-NEXT: vslidedown.vi v26, v10, 4
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 16(sp)
; RV64-NEXT: vslidedown.vi v26, v8, 3
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 12(sp)
; RV64-NEXT: vslidedown.vi v26, v10, 2
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 8(sp)
; RV64-NEXT: vslidedown.vi v26, v8, 1
; RV64-NEXT: vmv.x.s a0, v26
; RV64-NEXT: sw a0, 4(sp)
; RV64-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; RV64-NEXT: vle32.v v8, (sp)
; RV64-NEXT: addi sp, s0, -64
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
; CHECK-LABEL: shuffle_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 203
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 6, i32 7>
ret <8 x i32> %s
}
@ -124,22 +31,11 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_xv_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: addi a0, zero, 5
; CHECK-NEXT: sh a0, 14(sp)
; CHECK-NEXT: sh a0, 8(sp)
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 12(sp)
; CHECK-NEXT: vslidedown.vi v25, v8, 1
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 10(sp)
; CHECK-NEXT: addi a0, zero, 9
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i16> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x i16> %s
@ -148,22 +44,11 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_vx_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: addi a0, zero, 5
; CHECK-NEXT: sh a0, 12(sp)
; CHECK-NEXT: sh a0, 10(sp)
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: sh a0, 8(sp)
; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 3
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: sh a0, 14(sp)
; CHECK-NEXT: addi a0, zero, 6
; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x i16> %s