[RISCV] Teach vsetvli insertion that stores don't use the policy bits in vtype.

This can avoid a vsetvl after a tail undisturbed operation.

Differential Revision: https://reviews.llvm.org/D109549
This commit is contained in:
Craig Topper 2021-09-09 15:17:51 -07:00
parent 4e7ac6faca
commit 6c7cadb8c1
15 changed files with 33 additions and 67 deletions

View File

@ -58,12 +58,13 @@ class VSETVLIInfo {
uint8_t TailAgnostic : 1;
uint8_t MaskAgnostic : 1;
uint8_t MaskRegOp : 1;
uint8_t StoreOp : 1;
uint8_t SEWLMULRatioOnly : 1;
public:
VSETVLIInfo()
: AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false),
SEWLMULRatioOnly(false) {}
StoreOp(false), SEWLMULRatioOnly(false) {}
static VSETVLIInfo getUnknown() {
VSETVLIInfo Info;
@ -118,7 +119,8 @@ public:
TailAgnostic = RISCVVType::isTailAgnostic(VType);
MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
}
void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO) {
void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO,
bool IsStore) {
assert(isValid() && !isUnknown() &&
"Can't set VTYPE for uninitialized or unknown");
VLMul = L;
@ -126,6 +128,7 @@ public:
TailAgnostic = TA;
MaskAgnostic = MA;
MaskRegOp = MRO;
StoreOp = IsStore;
}
unsigned encodeVTYPE() const {
@ -198,17 +201,28 @@ public:
return true;
}
// VTypes must match unless the instruction is a mask reg operation, then it
// only care about VLMAX.
// FIXME: Mask reg operations are probably ok if "this" VLMAX is larger
// than "InstrInfo".
if (!hasSameVTYPE(InstrInfo) &&
!(InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) &&
TailAgnostic == InstrInfo.TailAgnostic &&
MaskAgnostic == InstrInfo.MaskAgnostic))
// The AVL must match.
if (!hasSameAVL(InstrInfo))
return false;
return hasSameAVL(InstrInfo);
// Simple case, see if full VTYPE matches.
if (hasSameVTYPE(InstrInfo))
return true;
// If this is a mask reg operation, it only cares about VLMAX.
// FIXME: Mask reg operations are probably ok if "this" VLMAX is larger
// than "InstrInfo".
if (InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) &&
TailAgnostic == InstrInfo.TailAgnostic &&
MaskAgnostic == InstrInfo.MaskAgnostic)
return true;
// Store instructions don't use the policy fields.
if (InstrInfo.StoreOp && VLMul == InstrInfo.VLMul && SEW == InstrInfo.SEW)
return true;
// Anything else is not compatible.
return false;
}
bool isCompatibleWithLoadStoreEEW(unsigned EEW,
@ -225,10 +239,9 @@ public:
if (!hasSameAVL(InstrInfo))
return false;
// TODO: This check isn't required for stores. But we should ignore for all
// stores not just unit-stride and strided so leaving it for now.
if (TailAgnostic != InstrInfo.TailAgnostic ||
MaskAgnostic != InstrInfo.MaskAgnostic)
// Stores can ignore the tail and mask policies.
if (!InstrInfo.StoreOp && (TailAgnostic != InstrInfo.TailAgnostic ||
MaskAgnostic != InstrInfo.MaskAgnostic))
return false;
return getSEWLMULRatio() == getSEWLMULRatio(EEW, InstrInfo.VLMul);
@ -428,6 +441,10 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
// If there are no explicit defs, this is a store instruction which can
// ignore the tail and mask policies.
bool StoreOp = MI.getNumExplicitDefs() == 0;
if (RISCVII::hasVLOp(TSFlags)) {
const MachineOperand &VLOp = MI.getOperand(NumOperands - 2);
if (VLOp.isImm()) {
@ -443,7 +460,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
} else
InstrInfo.setAVLReg(RISCV::NoRegister);
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
/*MaskAgnostic*/ false, MaskRegOp);
/*MaskAgnostic*/ false, MaskRegOp, StoreOp);
return InstrInfo;
}

View File

@ -681,7 +681,6 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX2-RV64-NEXT: or a1, a1, a2
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
@ -813,7 +812,6 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX1-RV64-NEXT: or a1, a1, a2
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
@ -2328,7 +2326,6 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV64-NEXT: or a1, a1, a2
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v27, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v26, (a6)
; LMULMAX1-RV64-NEXT: ret

View File

@ -3892,7 +3892,6 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX2-RV64-NEXT: srli a1, a1, 56
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
@ -4127,7 +4126,6 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
@ -11962,7 +11960,6 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v27, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v26, (a6)
; LMULMAX1-RV64-NEXT: ret

View File

@ -2721,7 +2721,6 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX2-RV64-NEXT: srli a1, a1, 56
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
@ -2905,7 +2904,6 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
@ -8294,7 +8292,6 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v27, (a6)
; LMULMAX1-RV64-NEXT: ret

View File

@ -856,7 +856,6 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX1-NEXT: vse8.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1176,7 +1175,6 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 4
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vse8.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1206,7 +1204,6 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 4
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1312,7 +1309,6 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 12
; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX1-NEXT: vse8.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1352,7 +1348,6 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 4
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v26, (a1)
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: ret
@ -1527,7 +1522,6 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vse8.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1563,7 +1557,6 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1593,7 +1586,6 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1645,7 +1637,6 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vse8.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1693,7 +1684,6 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: ret
;
@ -1733,7 +1723,6 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v26, (a1)
; LMULMAX1-NEXT: vse32.v v27, (a0)
; LMULMAX1-NEXT: ret
@ -1818,7 +1807,6 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v26, v26, 0
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v25, v26, 14
; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX1-NEXT: vse8.v v25, (a0)
; LMULMAX1-NEXT: ret
;
@ -1842,7 +1830,6 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
; LMULMAX4-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu
; LMULMAX4-NEXT: vslideup.vi v26, v25, 8
; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX4-NEXT: vse8.v v26, (a0)
; LMULMAX4-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i8>
@ -1905,7 +1892,6 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 6
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v26, (a1)
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: ret
@ -1926,7 +1912,6 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
; LMULMAX4-NEXT: vslideup.vi v26, v30, 0
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu
; LMULMAX4-NEXT: vslideup.vi v26, v28, 8
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; LMULMAX4-NEXT: vse16.v v26, (a0)
; LMULMAX4-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i16>
@ -1975,7 +1960,6 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: addi a1, a0, 48
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v26, (a1)
; LMULMAX1-NEXT: addi a1, a0, 32
; LMULMAX1-NEXT: vse32.v v29, (a1)
@ -1995,7 +1979,6 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
; LMULMAX4-NEXT: vslideup.vi v8, v12, 0
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu
; LMULMAX4-NEXT: vslideup.vi v8, v28, 8
; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: ret
%y = trunc <16 x i64> %x to <16 x i32>

View File

@ -91,7 +91,6 @@ define void @buildvec_dominant0_v2f32(<2 x float>* %x) {
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
store <2 x float> <float 0.0, float 1.0>, <2 x float>* %x

View File

@ -171,7 +171,6 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.f.w v27, v26
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v25, v27, 4
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
@ -228,7 +227,6 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v29, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v29, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x double>, <8 x double>* %x

View File

@ -476,7 +476,6 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v29, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vse8.v v29, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
@ -543,7 +542,6 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v29, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vse8.v v29, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x double>, <8 x double>* %x

View File

@ -500,7 +500,6 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v29, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v29, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
@ -557,7 +556,6 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v29, v25, 6
; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX1-NEXT: vse16.v v29, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x

View File

@ -132,7 +132,6 @@ define void @insert_v4i32_v2i32_2(<4 x i32>* %vp, <2 x i32>* %svp) {
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v26, (a0)
; CHECK-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
@ -212,7 +211,6 @@ define void @insert_v8i32_v2i32_2(<8 x i32>* %vp, <2 x i32>* %svp) {
; LMULMAX1-NEXT: vle32.v v26, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v26, (a0)
; LMULMAX1-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
@ -231,7 +229,6 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
; LMULMAX2-NEXT: vle32.v v28, (a0)
; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu
; LMULMAX2-NEXT: vslideup.vi v28, v26, 6
; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX2-NEXT: vse32.v v28, (a0)
; LMULMAX2-NEXT: ret
;
@ -244,7 +241,6 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) {
; LMULMAX1-NEXT: vle32.v v26, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v26, v25, 2
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v26, (a0)
; LMULMAX1-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
@ -307,7 +303,6 @@ define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) {
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v25, v26, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
%v = load <4 x i16>, <4 x i16>* %vp

View File

@ -16,7 +16,6 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
; RV32-NEXT: vslide1up.vx v28, v30, a1
; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu
; RV32-NEXT: vslideup.vi v26, v28, 3
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vse64.v v26, (a0)
; RV32-NEXT: ret
;
@ -27,7 +26,6 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
; RV64-NEXT: vmv.s.x v28, a1
; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV64-NEXT: vslideup.vi v26, v28, 3
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV64-NEXT: vse64.v v26, (a0)
; RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
@ -168,7 +166,6 @@ define void @insertelt_v8i64_0(<8 x i64>* %x) {
; CHECK-NEXT: addi a1, zero, -1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; CHECK-NEXT: vmv.s.x v28, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
@ -218,7 +215,6 @@ define void @insertelt_c6_v8i64_0(<8 x i64>* %x) {
; CHECK-NEXT: addi a1, zero, 6
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; CHECK-NEXT: vmv.s.x v28, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x

View File

@ -388,7 +388,6 @@ define void @buildvec_dominant0_v2i32(<2 x i64>* %x) {
; RV64-NEXT: addi a1, a1, -910
; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV64-NEXT: vmv.s.x v25, a1
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vse64.v v25, (a0)
; RV64-NEXT: ret
store <2 x i64> <i64 2049638230412172402, i64 -1>, <2 x i64>* %x
@ -575,7 +574,6 @@ define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 3
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v26, (a6)
; CHECK-NEXT: ret
store <4 x i32> <i32 0, i32 0, i32 1, i32 1>, <4 x i32>* %z0
@ -614,7 +612,6 @@ define void @buildvec_vid_step1o2_add3_v4i16(<4 x i16>* %z0, <4 x i16>* %z1, <4
; CHECK-NEXT: vmv.v.i v26, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vse16.v v26, (a6)
; CHECK-NEXT: ret
store <4 x i16> <i16 3, i16 3, i16 4, i16 4>, <4 x i16>* %z0

View File

@ -223,7 +223,6 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v27, v25, 4
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vse8.v v27, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i32>, <8 x i32>* %x

View File

@ -48,7 +48,6 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
; RV64-1024-NEXT: vslideup.vi v0, v25, 3
; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, tu, mu
; RV64-1024-NEXT: vrgather.vv v12, v28, v8, v0.t
; RV64-1024-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; RV64-1024-NEXT: vse16.v v12, (a0)
; RV64-1024-NEXT: ret
;
@ -97,7 +96,6 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
; RV64-2048-NEXT: vslideup.vi v0, v25, 3
; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, tu, mu
; RV64-2048-NEXT: vrgather.vv v30, v26, v28, v0.t
; RV64-2048-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-2048-NEXT: vse16.v v30, (a0)
; RV64-2048-NEXT: ret
entry:
@ -225,7 +223,6 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
; RV64-1024-NEXT: addi a1, a1, 16
; RV64-1024-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload
; RV64-1024-NEXT: vrgather.vv v8, v24, v16, v0.t
; RV64-1024-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; RV64-1024-NEXT: vse16.v v8, (a0)
; RV64-1024-NEXT: csrr a0, vlenb
; RV64-1024-NEXT: addi a1, zero, 40
@ -287,7 +284,6 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
; RV64-2048-NEXT: vslideup.vi v0, v25, 7
; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, tu, mu
; RV64-2048-NEXT: vrgather.vv v12, v28, v8, v0.t
; RV64-2048-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; RV64-2048-NEXT: vse16.v v12, (a0)
; RV64-2048-NEXT: ret
entry:

View File

@ -457,7 +457,6 @@ define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* no
; CHECK-NEXT: add a2, a2, a1
; CHECK-NEXT: vsetvli zero, a4, e32, m8, tu, mu
; CHECK-NEXT: vfmacc.vf v16, ft0, v8
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vse32.v v16, (a3)
; CHECK-NEXT: sub a0, a0, a4
; CHECK-NEXT: vsetvli a4, a0, e32, m8, ta, mu