[RISCV] Fix alias printing for vmnot.m

By clearing the HasDummyMask flag from mask register binary operations
and mask load/store.

HasDummyMask was causing an extra operand to get appended when
converting from MachineInstr to MCInst. This extra operand doesn't
appear in the assembly string so was mostly ignored, but it prevented
the alias instruction printing from working correctly.

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D124424
This commit is contained in:
Craig Topper 2022-04-28 08:20:42 -07:00
parent 2883de0514
commit 8631a5e712
14 changed files with 226 additions and 224 deletions

View File

@ -641,7 +641,7 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
let VLMul = m.value; let VLMul = m.value;
} }
class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> : class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF, bit DummyMask = 1> :
Pseudo<(outs RetClass:$rd), Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo, RISCVVPseudo,
@ -651,7 +651,7 @@ class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
let hasSideEffects = 0; let hasSideEffects = 0;
let HasVLOp = 1; let HasVLOp = 1;
let HasSEWOp = 1; let HasSEWOp = 1;
let HasDummyMask = 1; let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
} }
@ -794,7 +794,7 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
} }
class VPseudoUSStoreNoMask<VReg StClass, int EEW>: class VPseudoUSStoreNoMask<VReg StClass, int EEW, bit DummyMask = 1>:
Pseudo<(outs), Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo, RISCVVPseudo,
@ -804,7 +804,7 @@ class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
let hasSideEffects = 0; let hasSideEffects = 0;
let HasVLOp = 1; let HasVLOp = 1;
let HasSEWOp = 1; let HasSEWOp = 1;
let HasDummyMask = 1; let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
} }
@ -1035,7 +1035,8 @@ class VPseudoUnaryAnyMask<VReg RetClass,
class VPseudoBinaryNoMask<VReg RetClass, class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class, VReg Op1Class,
DAGOperand Op2Class, DAGOperand Op2Class,
string Constraint> : string Constraint,
int DummyMask = 1> :
Pseudo<(outs RetClass:$rd), Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo { RISCVVPseudo {
@ -1045,7 +1046,7 @@ class VPseudoBinaryNoMask<VReg RetClass,
let Constraints = Constraint; let Constraints = Constraint;
let HasVLOp = 1; let HasVLOp = 1;
let HasSEWOp = 1; let HasSEWOp = 1;
let HasDummyMask = 1; let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
} }
@ -1544,7 +1545,8 @@ multiclass VPseudoFFLoad {
multiclass VPseudoLoadMask { multiclass VPseudoLoadMask {
foreach mti = AllMasks in { foreach mti = AllMasks in {
let VLMul = mti.LMul.value in { let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>; def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0,
/*DummyMask*/0>;
} }
} }
} }
@ -1616,7 +1618,7 @@ multiclass VPseudoUSStore {
multiclass VPseudoStoreMask { multiclass VPseudoStoreMask {
foreach mti = AllMasks in { foreach mti = AllMasks in {
let VLMul = mti.LMul.value in { let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>; def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
} }
} }
} }
@ -1866,7 +1868,7 @@ multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
multiclass VPseudoVALU_MM { multiclass VPseudoVALU_MM {
foreach m = MxList in foreach m = MxList in
let VLMul = m.value in { let VLMul = m.value in {
def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">, def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "", /*DummyMask*/0>,
Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>; Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
} }
} }

View File

@ -256,7 +256,7 @@ define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z)
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vle16.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmflt.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x %a = load <32 x half>, <32 x half>* %x
@ -290,7 +290,7 @@ define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %
; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vle32.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v16, v8, v12
; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x %a = load <16 x float>, <16 x float>* %x
@ -323,7 +323,7 @@ define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z)
; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmfle.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x %a = load <8 x double>, <8 x double>* %x
@ -357,7 +357,7 @@ define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z)
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmfle.vv v24, v8, v16
; CHECK-NEXT: vmnand.mm v8, v24, v24 ; CHECK-NEXT: vmnot.m v8, v24
; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x %a = load <64 x half>, <64 x half>* %x
@ -761,7 +761,7 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x %a = load <32 x half>, <32 x half>* %x
@ -795,7 +795,7 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x %a = load <16 x float>, <16 x float>* %x
@ -828,7 +828,7 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmfge.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x %a = load <8 x double>, <8 x double>* %x
@ -862,7 +862,7 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmfle.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x %a = load <64 x half>, <64 x half>* %x
@ -1269,7 +1269,7 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x %a = load <32 x half>, <32 x half>* %x
@ -1303,7 +1303,7 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x %a = load <16 x float>, <16 x float>* %x
@ -1336,7 +1336,7 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmfle.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x %a = load <8 x double>, <8 x double>* %x
@ -1370,7 +1370,7 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmfge.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x %a = load <64 x half>, <64 x half>* %x

View File

@ -59,7 +59,7 @@ define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) {
; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vmnand.mm v8, v8, v8 ; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%a = load <64 x i1>, <64 x i1>* %x %a = load <64 x i1>, <64 x i1>* %x

View File

@ -10,7 +10,7 @@ define signext i1 @vpreduce_and_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v1i1: ; CHECK-LABEL: vpreduce_and_v1i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -62,7 +62,7 @@ define signext i1 @vpreduce_and_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v2i1: ; CHECK-LABEL: vpreduce_and_v2i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -114,7 +114,7 @@ define signext i1 @vpreduce_and_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v4i1: ; CHECK-LABEL: vpreduce_and_v4i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -166,7 +166,7 @@ define signext i1 @vpreduce_and_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_and_v8i1: ; CHECK-LABEL: vpreduce_and_v8i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -218,7 +218,7 @@ define signext i1 @vpreduce_and_v10i1(i1 signext %s, <10 x i1> %v, <10 x i1> %m,
; CHECK-LABEL: vpreduce_and_v10i1: ; CHECK-LABEL: vpreduce_and_v10i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -235,7 +235,7 @@ define signext i1 @vpreduce_and_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_and_v16i1: ; CHECK-LABEL: vpreduce_and_v16i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -259,7 +259,7 @@ define signext i1 @vpreduce_and_v256i1(i1 signext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: .LBB14_2: ; CHECK-NEXT: .LBB14_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v8, v8 ; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vcpop.m a2, v8, v0.t ; CHECK-NEXT: vcpop.m a2, v8, v0.t
; CHECK-NEXT: li a3, 128 ; CHECK-NEXT: li a3, 128
@ -269,7 +269,7 @@ define signext i1 @vpreduce_and_v256i1(i1 signext %s, <256 x i1> %v, <256 x i1>
; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB14_4: ; CHECK-NEXT: .LBB14_4:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v11, v11 ; CHECK-NEXT: vmnot.m v8, v11
; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vcpop.m a1, v8, v0.t ; CHECK-NEXT: vcpop.m a1, v8, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1

View File

@ -315,7 +315,7 @@ define <8 x i1> @fcmp_ugt_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m,
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -327,7 +327,7 @@ define <8 x i1> @fcmp_ugt_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -341,7 +341,7 @@ define <8 x i1> @fcmp_ugt_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -355,7 +355,7 @@ define <8 x i1> @fcmp_uge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m,
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -367,7 +367,7 @@ define <8 x i1> @fcmp_uge_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -381,7 +381,7 @@ define <8 x i1> @fcmp_uge_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -395,7 +395,7 @@ define <8 x i1> @fcmp_ult_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m,
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -407,7 +407,7 @@ define <8 x i1> @fcmp_ult_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -421,7 +421,7 @@ define <8 x i1> @fcmp_ult_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -435,7 +435,7 @@ define <8 x i1> @fcmp_ule_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m,
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -447,7 +447,7 @@ define <8 x i1> @fcmp_ule_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -461,7 +461,7 @@ define <8 x i1> @fcmp_ule_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i3
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0 %elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
@ -876,7 +876,7 @@ define <8 x i1> @fcmp_ugt_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t ; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -888,7 +888,7 @@ define <8 x i1> @fcmp_ugt_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -902,7 +902,7 @@ define <8 x i1> @fcmp_ugt_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -916,7 +916,7 @@ define <8 x i1> @fcmp_uge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -928,7 +928,7 @@ define <8 x i1> @fcmp_uge_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -942,7 +942,7 @@ define <8 x i1> @fcmp_uge_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -956,7 +956,7 @@ define <8 x i1> @fcmp_ult_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t ; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -968,7 +968,7 @@ define <8 x i1> @fcmp_ult_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -982,7 +982,7 @@ define <8 x i1> @fcmp_ult_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -996,7 +996,7 @@ define <8 x i1> @fcmp_ule_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t ; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v ret <8 x i1> %v
@ -1008,7 +1008,7 @@ define <8 x i1> @fcmp_ule_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
@ -1022,7 +1022,7 @@ define <8 x i1> @fcmp_ule_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0 %elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer

View File

@ -86,7 +86,7 @@ define signext i1 @vreduce_and_v2i1(<2 x i1> %v) {
; CHECK-LABEL: vreduce_and_v2i1: ; CHECK-LABEL: vreduce_and_v2i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -129,7 +129,7 @@ define signext i1 @vreduce_and_v4i1(<4 x i1> %v) {
; CHECK-LABEL: vreduce_and_v4i1: ; CHECK-LABEL: vreduce_and_v4i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -172,7 +172,7 @@ define signext i1 @vreduce_and_v8i1(<8 x i1> %v) {
; CHECK-LABEL: vreduce_and_v8i1: ; CHECK-LABEL: vreduce_and_v8i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -215,7 +215,7 @@ define signext i1 @vreduce_and_v16i1(<16 x i1> %v) {
; CHECK-LABEL: vreduce_and_v16i1: ; CHECK-LABEL: vreduce_and_v16i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -288,7 +288,7 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
; LMULMAX8: # %bb.0: ; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 ; LMULMAX8-NEXT: vmnot.m v8, v0
; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: neg a0, a0
@ -367,7 +367,7 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
; LMULMAX8: # %bb.0: ; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 64 ; LMULMAX8-NEXT: li a0, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 ; LMULMAX8-NEXT: vmnot.m v8, v0
; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: neg a0, a0

View File

@ -315,7 +315,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -327,7 +327,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -341,7 +341,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -355,7 +355,7 @@ define <vscale x 1 x i1> @fcmp_uge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -367,7 +367,7 @@ define <vscale x 1 x i1> @fcmp_uge_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -381,7 +381,7 @@ define <vscale x 1 x i1> @fcmp_uge_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -395,7 +395,7 @@ define <vscale x 1 x i1> @fcmp_ult_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -407,7 +407,7 @@ define <vscale x 1 x i1> @fcmp_ult_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -421,7 +421,7 @@ define <vscale x 1 x i1> @fcmp_ult_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -435,7 +435,7 @@ define <vscale x 1 x i1> @fcmp_ule_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -447,7 +447,7 @@ define <vscale x 1 x i1> @fcmp_ule_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -461,7 +461,7 @@ define <vscale x 1 x i1> @fcmp_ule_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
@ -876,7 +876,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vv v12, v8, v10, v0.t ; CHECK-NEXT: vmfle.vv v12, v8, v10, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -888,7 +888,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -902,7 +902,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -916,7 +916,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vv v12, v8, v10, v0.t ; CHECK-NEXT: vmflt.vv v12, v8, v10, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -928,7 +928,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -942,7 +942,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -956,7 +956,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vv v12, v10, v8, v0.t ; CHECK-NEXT: vmfle.vv v12, v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -968,7 +968,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -982,7 +982,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -996,7 +996,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vv v12, v10, v8, v0.t ; CHECK-NEXT: vmflt.vv v12, v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -1008,7 +1008,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -1022,7 +1022,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -1425,7 +1425,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vv_nxv1f64(<vscale x 1 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -1437,7 +1437,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1451,7 +1451,7 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1465,7 +1465,7 @@ define <vscale x 1 x i1> @fcmp_uge_vv_nxv1f64(<vscale x 1 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -1477,7 +1477,7 @@ define <vscale x 1 x i1> @fcmp_uge_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1491,7 +1491,7 @@ define <vscale x 1 x i1> @fcmp_uge_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1505,7 +1505,7 @@ define <vscale x 1 x i1> @fcmp_ult_vv_nxv1f64(<vscale x 1 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -1517,7 +1517,7 @@ define <vscale x 1 x i1> @fcmp_ult_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1531,7 +1531,7 @@ define <vscale x 1 x i1> @fcmp_ult_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1545,7 +1545,7 @@ define <vscale x 1 x i1> @fcmp_ule_vv_nxv1f64(<vscale x 1 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl) %v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v ret <vscale x 1 x i1> %v
@ -1557,7 +1557,7 @@ define <vscale x 1 x i1> @fcmp_ule_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1571,7 +1571,7 @@ define <vscale x 1 x i1> @fcmp_ule_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
@ -1986,7 +1986,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vv v24, v8, v16, v0.t ; CHECK-NEXT: vmfle.vv v24, v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -1998,7 +1998,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2012,7 +2012,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2026,7 +2026,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vv v24, v8, v16, v0.t ; CHECK-NEXT: vmflt.vv v24, v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -2038,7 +2038,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2052,7 +2052,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2066,7 +2066,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vv v24, v16, v8, v0.t ; CHECK-NEXT: vmfle.vv v24, v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -2078,7 +2078,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2092,7 +2092,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2106,7 +2106,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vv v24, v16, v8, v0.t ; CHECK-NEXT: vmflt.vv v24, v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl) %v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v ret <vscale x 8 x i1> %v
@ -2118,7 +2118,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2132,7 +2132,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer

View File

@ -485,7 +485,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v12, v8, v10 ; CHECK-NEXT: vmfle.vv v12, v8, v10
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x half> %va, %vb %vc = fcmp ugt <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -496,7 +496,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmfle.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -509,7 +509,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmfge.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -544,7 +544,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v12, v8, v10
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x half> %va, %vb %vc = fcmp uge <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -555,7 +555,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -568,7 +568,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -603,7 +603,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vmfle.vv v12, v10, v8
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x half> %va, %vb %vc = fcmp ult <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -614,7 +614,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmfge.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -627,7 +627,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmfle.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -662,7 +662,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v12, v10, v8 ; CHECK-NEXT: vmflt.vv v12, v10, v8
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x half> %va, %vb %vc = fcmp ule <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -673,7 +673,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -686,7 +686,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f16(<vscale x 8 x half> %va, half %b)
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v10, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
@ -1319,7 +1319,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v16, v8, v12 ; CHECK-NEXT: vmfle.vv v16, v8, v12
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x float> %va, %vb %vc = fcmp ugt <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -1330,7 +1330,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmfle.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1343,7 +1343,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmfge.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1378,7 +1378,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v16, v8, v12
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x float> %va, %vb %vc = fcmp uge <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -1389,7 +1389,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1402,7 +1402,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1437,7 +1437,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmfle.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x float> %va, %vb %vc = fcmp ult <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -1448,7 +1448,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmfge.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1461,7 +1461,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmfle.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1496,7 +1496,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmflt.vv v16, v12, v8
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x float> %va, %vb %vc = fcmp ule <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -1507,7 +1507,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -1520,7 +1520,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f32(<vscale x 8 x float> %va, float %b
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v12, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
@ -2153,7 +2153,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmfle.vv v24, v8, v16
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x double> %va, %vb %vc = fcmp ugt <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -2164,7 +2164,7 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmfle.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2177,7 +2177,7 @@ define <vscale x 8 x i1> @fcmp_ugt_fv_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmfge.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2212,7 +2212,7 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v24, v8, v16
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x double> %va, %vb %vc = fcmp uge <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -2223,7 +2223,7 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2236,7 +2236,7 @@ define <vscale x 8 x i1> @fcmp_uge_fv_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2271,7 +2271,7 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vv v24, v16, v8 ; CHECK-NEXT: vmfle.vv v24, v16, v8
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x double> %va, %vb %vc = fcmp ult <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -2282,7 +2282,7 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmfge.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2295,7 +2295,7 @@ define <vscale x 8 x i1> @fcmp_ult_fv_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmfle.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2330,7 +2330,7 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f64(<vscale x 8 x double> %va, <vscale
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vv v24, v16, v8 ; CHECK-NEXT: vmflt.vv v24, v16, v8
; CHECK-NEXT: vmnand.mm v0, v24, v24 ; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x double> %va, %vb %vc = fcmp ule <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc ret <vscale x 8 x i1> %vc
@ -2341,7 +2341,7 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
@ -2354,7 +2354,7 @@ define <vscale x 8 x i1> @fcmp_ule_fv_nxv8f64(<vscale x 8 x double> %va, double
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v16, v8, fa0
; CHECK-NEXT: vmnand.mm v0, v16, v16 ; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret ; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer

View File

@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
@ -995,7 +995,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
@ -1043,7 +1043,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
@ -1091,7 +1091,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8( %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
@ -1187,7 +1187,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8( %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
@ -1235,7 +1235,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
@ -1283,7 +1283,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
@ -1331,7 +1331,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
@ -1379,7 +1379,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
@ -1427,7 +1427,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16( %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
@ -1475,7 +1475,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
@ -1523,7 +1523,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
@ -1571,7 +1571,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
@ -1619,7 +1619,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(

View File

@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
@ -995,7 +995,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
@ -1043,7 +1043,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
@ -1091,7 +1091,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8( %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
@ -1187,7 +1187,7 @@ define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8( %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
@ -1235,7 +1235,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
@ -1283,7 +1283,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
@ -1331,7 +1331,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
@ -1379,7 +1379,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
@ -1427,7 +1427,7 @@ define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16( %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
@ -1475,7 +1475,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
@ -1523,7 +1523,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
@ -1571,7 +1571,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
@ -1619,7 +1619,7 @@ define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32( %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
@ -1667,7 +1667,7 @@ define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmslt.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64( %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
@ -1715,7 +1715,7 @@ define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmslt.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64( %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
@ -1763,7 +1763,7 @@ define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmslt.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64( %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(

View File

@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
@ -995,7 +995,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
@ -1043,7 +1043,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
@ -1091,7 +1091,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8( %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
@ -1187,7 +1187,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8( %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
@ -1235,7 +1235,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
@ -1283,7 +1283,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
@ -1331,7 +1331,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
@ -1379,7 +1379,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
@ -1427,7 +1427,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16>
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16( %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
@ -1475,7 +1475,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
@ -1523,7 +1523,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
@ -1571,7 +1571,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
@ -1619,7 +1619,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(

View File

@ -947,7 +947,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
@ -995,7 +995,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
@ -1043,7 +1043,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
@ -1091,7 +1091,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8( %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
@ -1187,7 +1187,7 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8( %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
@ -1235,7 +1235,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
@ -1283,7 +1283,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
@ -1331,7 +1331,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
@ -1379,7 +1379,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
@ -1427,7 +1427,7 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16>
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16( %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
@ -1475,7 +1475,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
@ -1523,7 +1523,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
@ -1571,7 +1571,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
@ -1619,7 +1619,7 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32( %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
@ -1667,7 +1667,7 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmsltu.vx v8, v8, a0
; CHECK-NEXT: vmnand.mm v0, v8, v8 ; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64( %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
@ -1715,7 +1715,7 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmsltu.vx v10, v8, a0
; CHECK-NEXT: vmnand.mm v0, v10, v10 ; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64( %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
@ -1763,7 +1763,7 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0,
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmsltu.vx v12, v8, a0
; CHECK-NEXT: vmnand.mm v0, v12, v12 ; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret ; CHECK-NEXT: ret
entry: entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64( %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(

View File

@ -8,7 +8,7 @@ define signext i1 @vpreduce_and_nxv1i1(i1 signext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_and_nxv1i1: ; CHECK-LABEL: vpreduce_and_nxv1i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -60,7 +60,7 @@ define signext i1 @vpreduce_and_nxv2i1(i1 signext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_and_nxv2i1: ; CHECK-LABEL: vpreduce_and_nxv2i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -112,7 +112,7 @@ define signext i1 @vpreduce_and_nxv4i1(i1 signext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_and_nxv4i1: ; CHECK-LABEL: vpreduce_and_nxv4i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -164,7 +164,7 @@ define signext i1 @vpreduce_and_nxv8i1(i1 signext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_and_nxv8i1: ; CHECK-LABEL: vpreduce_and_nxv8i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -216,7 +216,7 @@ define signext i1 @vpreduce_and_nxv16i1(i1 signext %s, <vscale x 16 x i1> %v, <v
; CHECK-LABEL: vpreduce_and_nxv16i1: ; CHECK-LABEL: vpreduce_and_nxv16i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -268,7 +268,7 @@ define signext i1 @vpreduce_and_nxv32i1(i1 signext %s, <vscale x 32 x i1> %v, <v
; CHECK-LABEL: vpreduce_and_nxv32i1: ; CHECK-LABEL: vpreduce_and_nxv32i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1
@ -338,7 +338,7 @@ define signext i1 @vpreduce_and_nxv64i1(i1 signext %s, <vscale x 64 x i1> %v, <v
; CHECK-LABEL: vpreduce_and_nxv64i1: ; CHECK-LABEL: vpreduce_and_nxv64i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: seqz a1, a1

View File

@ -36,7 +36,7 @@ define signext i1 @vreduce_and_nxv1i1(<vscale x 1 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv1i1: ; CHECK-LABEL: vreduce_and_nxv1i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -79,7 +79,7 @@ define signext i1 @vreduce_and_nxv2i1(<vscale x 2 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv2i1: ; CHECK-LABEL: vreduce_and_nxv2i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -122,7 +122,7 @@ define signext i1 @vreduce_and_nxv4i1(<vscale x 4 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv4i1: ; CHECK-LABEL: vreduce_and_nxv4i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -165,7 +165,7 @@ define signext i1 @vreduce_and_nxv8i1(<vscale x 8 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv8i1: ; CHECK-LABEL: vreduce_and_nxv8i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -208,7 +208,7 @@ define signext i1 @vreduce_and_nxv16i1(<vscale x 16 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv16i1: ; CHECK-LABEL: vreduce_and_nxv16i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -251,7 +251,7 @@ define signext i1 @vreduce_and_nxv32i1(<vscale x 32 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv32i1: ; CHECK-LABEL: vreduce_and_nxv32i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0
@ -294,7 +294,7 @@ define signext i1 @vreduce_and_nxv64i1(<vscale x 64 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv64i1: ; CHECK-LABEL: vreduce_and_nxv64i1:
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0 ; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: neg a0, a0