[RISCV] Fix operand order in fixed-length VM(OR|AND)NOT patterns

Where the RVV specification writes `vs2, vs1`, our TableGen patterns use
`rs1, rs2`. These differences can easily cause confusion. The VMANDNOT
instruction performs `LHS && !RHS`, and similarly for VMORNOT.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D102606
This commit is contained in:
Fraser Cormack 2021-05-17 11:13:19 +01:00
parent cc1a6361d3
commit 175bdf127d
3 changed files with 18 additions and 18 deletions

View File

@ -1073,14 +1073,14 @@ foreach mti = AllMasks in {
(!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.

View File

@ -75,7 +75,7 @@ define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-NEXT: vsetivli a2, 8, e8,mf2,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmandnot.mm v25, v25, v26
; CHECK-NEXT: vmandnot.mm v25, v26, v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
@ -92,7 +92,7 @@ define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmornot.mm v25, v25, v26
; CHECK-NEXT: vmornot.mm v25, v26, v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x

View File

@ -15,7 +15,7 @@ define <1 x i1> @select_v1i1(i1 zeroext %c, <1 x i1> %a, <1 x i1> %b) {
; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -36,7 +36,7 @@ define <1 x i1> @selectcc_v1i1(i1 signext %a, i1 signext %b, <1 x i1> %c, <1 x i
; CHECK-NEXT: vsetivli a1, 1, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -56,7 +56,7 @@ define <2 x i1> @select_v2i1(i1 zeroext %c, <2 x i1> %a, <2 x i1> %b) {
; CHECK-NEXT: vsetivli a0, 2, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -77,7 +77,7 @@ define <2 x i1> @selectcc_v2i1(i1 signext %a, i1 signext %b, <2 x i1> %c, <2 x i
; CHECK-NEXT: vsetivli a1, 2, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -97,7 +97,7 @@ define <4 x i1> @select_v4i1(i1 zeroext %c, <4 x i1> %a, <4 x i1> %b) {
; CHECK-NEXT: vsetivli a0, 4, e8,mf4,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -118,7 +118,7 @@ define <4 x i1> @selectcc_v4i1(i1 signext %a, i1 signext %b, <4 x i1> %c, <4 x i
; CHECK-NEXT: vsetivli a1, 4, e8,mf4,ta,mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -138,7 +138,7 @@ define <8 x i1> @select_v8i1(i1 zeroext %c, <8 x i1> %a, <8 x i1> %b) {
; CHECK-NEXT: vsetivli a0, 8, e8,mf2,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -159,7 +159,7 @@ define <8 x i1> @selectcc_v8i1(i1 signext %a, i1 signext %b, <8 x i1> %c, <8 x i
; CHECK-NEXT: vsetivli a1, 8, e8,mf2,ta,mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -179,7 +179,7 @@ define <16 x i1> @select_v16i1(i1 zeroext %c, <16 x i1> %a, <16 x i1> %b) {
; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret
@ -200,7 +200,7 @@ define <16 x i1> @selectcc_v16i1(i1 signext %a, i1 signext %b, <16 x i1> %c, <16
; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v26, v25, 0
; CHECK-NEXT: vmandnot.mm v25, v26, v8
; CHECK-NEXT: vmandnot.mm v25, v8, v26
; CHECK-NEXT: vmand.mm v26, v0, v26
; CHECK-NEXT: vmor.mm v0, v26, v25
; CHECK-NEXT: ret