forked from OSchip/llvm-project
[RISCV] Mark vsadd(u)_vl as commutable
This allows fixed length vectors involving splats on the LHS to commute into the _vx form of the instruction. Oddly, the generic canonicalization rules appear to catch the scalable vector cases. I haven't fully dug in to understand why, but I suspect it's because of a difference in how we represent splats (splat_vector vs build_vector). Differential Revision: https://reviews.llvm.org/D129302
This commit is contained in:
parent
a84e1e6c0d
commit
264018d764
|
@ -81,8 +81,8 @@ def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCom
|
|||
def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
|
||||
def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
|
||||
|
||||
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL>;
|
||||
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL>;
|
||||
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
|
||||
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
|
||||
def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
|
||||
def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
|
||||
|
||||
|
|
|
@ -80,9 +80,8 @@ define <2 x i1> @fv2(ptr %p, i64 %index, i64 %tc) {
|
|||
; CHECK-LABEL: fv2:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vmv.v.x v8, a1
|
||||
; CHECK-NEXT: vid.v v9
|
||||
; CHECK-NEXT: vsaddu.vv v8, v8, v9
|
||||
; CHECK-NEXT: vid.v v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v0, v8, a2
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 %index, i64 %tc)
|
||||
|
@ -93,9 +92,8 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
|
|||
; CHECK-LABEL: fv8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vmv.v.x v8, a1
|
||||
; CHECK-NEXT: vid.v v12
|
||||
; CHECK-NEXT: vsaddu.vv v8, v8, v12
|
||||
; CHECK-NEXT: vid.v v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v0, v8, a2
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 %index, i64 %tc)
|
||||
|
@ -105,18 +103,17 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
|
|||
define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
|
||||
; CHECK-LABEL: fv32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vmv.v.x v16, a1
|
||||
; CHECK-NEXT: vsaddu.vv v8, v16, v8
|
||||
; CHECK-NEXT: vmsltu.vx v24, v8, a2
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vid.v v8
|
||||
; CHECK-NEXT: vsaddu.vv v8, v16, v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v0, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 2
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 2
|
||||
; CHECK-NEXT: ret
|
||||
%mask = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 %index, i64 %tc)
|
||||
ret <32 x i1> %mask
|
||||
|
@ -125,31 +122,30 @@ define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
|
|||
define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
|
||||
; CHECK-LABEL: fv64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vmv.v.x v8, a1
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vid.v v16
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v0, v16, a2
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vid.v v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v0, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 2
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 2
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 4
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 4
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI9_2)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_2)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v8, v8, v16
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 6
|
||||
|
@ -161,63 +157,62 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
|
|||
define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
|
||||
; CHECK-LABEL: fv128:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_0)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0)
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vmv.v.x v8, a1
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vid.v v16
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v0, v16, a2
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vid.v v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v0, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 2
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 2
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_1)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_1)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 6, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 4
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 4
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_2)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 6
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 6
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_3)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 10, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 8
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 8
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 12, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 10
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 10
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_5)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v16, v8, v16
|
||||
; CHECK-NEXT: vmsltu.vx v24, v16, a2
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetivli zero, 14, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v24, 12
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 12
|
||||
; CHECK-NEXT: lui a0, %hi(.LCPI10_6)
|
||||
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsaddu.vv v8, v8, v16
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a1
|
||||
; CHECK-NEXT: vmsltu.vx v16, v8, a2
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vslideup.vi v0, v16, 14
|
||||
|
|
|
@ -320,8 +320,7 @@ define <2 x i32> @sadd_v2i32_vx_commute(<2 x i32> %va, i32 %b) {
|
|||
; CHECK-LABEL: sadd_v2i32_vx_commute:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vmv.v.x v9, a0
|
||||
; CHECK-NEXT: vsadd.vv v8, v9, v8
|
||||
; CHECK-NEXT: vsadd.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
|
||||
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
|
||||
|
|
|
@ -320,8 +320,7 @@ define <2 x i32> @uadd_v2i32_vx_commute(<2 x i32> %va, i32 %b) {
|
|||
; CHECK-LABEL: uadd_v2i32_vx_commute:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vmv.v.x v9, a0
|
||||
; CHECK-NEXT: vsaddu.vv v8, v9, v8
|
||||
; CHECK-NEXT: vsaddu.vx v8, v8, a0
|
||||
; CHECK-NEXT: ret
|
||||
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
|
||||
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
|
||||
|
|
Loading…
Reference in New Issue