forked from OSchip/llvm-project
[RISCV] Teach needVSETVLIPHI to handle mask register instructions.
This handles the case where the mask register instruction input comes from a Phi of vsetvlis. If the VLMAX is the same as the VLMAX required by the mask register instruction, we can avoid a vsetvli. Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D113204
This commit is contained in:
parent
4f11944652
commit
f59307bfdc
|
@ -900,7 +900,8 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
|
|||
const BlockData &PBBInfo = BlockInfo[PBB->getNumber()];
|
||||
// If the exit from the predecessor has the VTYPE we are looking for
|
||||
// we might be able to avoid a VSETVLI.
|
||||
if (PBBInfo.Exit.isUnknown() || !PBBInfo.Exit.hasSameVTYPE(Require))
|
||||
if (PBBInfo.Exit.isUnknown() ||
|
||||
!PBBInfo.Exit.hasCompatibleVTYPE(Require, /*Strict*/ false))
|
||||
return true;
|
||||
|
||||
// We need the PHI input to the be the output of a VSET(I)VLI.
|
||||
|
|
|
@ -94,28 +94,27 @@ entry:
|
|||
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64)
|
||||
declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1>, <vscale x 1 x i1>, i64)
|
||||
|
||||
; FIXME: There shouldn't be a vsetvli before the vmor.
|
||||
; Make sure we don't insert a vsetvli for the vmor instruction.
|
||||
define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) {
|
||||
; CHECK-LABEL: test6:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, mu
|
||||
; CHECK-NEXT: beqz a3, .LBB5_3
|
||||
; CHECK-NEXT: vsetvli a6, a2, e32, m1, ta, mu
|
||||
; CHECK-NEXT: beqz a6, .LBB5_3
|
||||
; CHECK-NEXT: # %bb.1: # %for.body.preheader
|
||||
; CHECK-NEXT: mv a4, zero
|
||||
; CHECK-NEXT: .LBB5_2: # %for.body
|
||||
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
||||
; CHECK-NEXT: slli a6, a4, 2
|
||||
; CHECK-NEXT: add a5, a0, a6
|
||||
; CHECK-NEXT: vle32.v v8, (a5)
|
||||
; CHECK-NEXT: slli a5, a4, 2
|
||||
; CHECK-NEXT: add a3, a0, a5
|
||||
; CHECK-NEXT: vle32.v v8, (a3)
|
||||
; CHECK-NEXT: vmsle.vi v9, v8, -3
|
||||
; CHECK-NEXT: vmsgt.vi v10, v8, 2
|
||||
; CHECK-NEXT: vsetvli zero, a3, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmor.mm v0, v9, v10
|
||||
; CHECK-NEXT: add a5, a1, a6
|
||||
; CHECK-NEXT: vse32.v v8, (a5), v0.t
|
||||
; CHECK-NEXT: add a4, a4, a3
|
||||
; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, mu
|
||||
; CHECK-NEXT: bnez a3, .LBB5_2
|
||||
; CHECK-NEXT: add a3, a1, a5
|
||||
; CHECK-NEXT: vse32.v v8, (a3), v0.t
|
||||
; CHECK-NEXT: add a4, a4, a6
|
||||
; CHECK-NEXT: vsetvli a6, a2, e32, m1, ta, mu
|
||||
; CHECK-NEXT: bnez a6, .LBB5_2
|
||||
; CHECK-NEXT: .LBB5_3: # %for.cond.cleanup
|
||||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
|
|
Loading…
Reference in New Issue