[AMDGPU] Divergence driven selection for fused bitlogic

The change adds divergence predicates for fused logical operations.
The problem with selecting a scalar fused op such as S_NOR_B32 is
that it does not have a VALU counterpart and will be split in
moveToVALU. At the same time it prevents selection of a better
opcode on the VALU side (such as V_OR3_B32) which does not have a
counterpart on SALU side.

XNOR opcodes are left as is and selected as scalar to get advantage
of the SIInstrInfo::lowerScalarXnor() code which can commute
operations to keep one of two opcodes on SALU if possible. See
xnor.ll test for this.

Differential Revision: https://reviews.llvm.org/D111907
This commit is contained in:
Stanislav Mekhanoshin 2021-10-15 13:07:32 -07:00
parent 605efd5dd5
commit 7cdb1df8c7
3 changed files with 16 additions and 11 deletions

View File

@ -558,19 +558,19 @@ def S_XNOR_B64 : SOP2_64 <"s_xnor_b64",
>;
def S_NAND_B32 : SOP2_32 <"s_nand_b32",
[(set i32:$sdst, (not (and_oneuse i32:$src0, i32:$src1)))]
[(set i32:$sdst, (UniformUnaryFrag<not> (and_oneuse i32:$src0, i32:$src1)))]
>;
def S_NAND_B64 : SOP2_64 <"s_nand_b64",
[(set i64:$sdst, (not (and_oneuse i64:$src0, i64:$src1)))]
[(set i64:$sdst, (UniformUnaryFrag<not> (and_oneuse i64:$src0, i64:$src1)))]
>;
def S_NOR_B32 : SOP2_32 <"s_nor_b32",
[(set i32:$sdst, (not (or_oneuse i32:$src0, i32:$src1)))]
[(set i32:$sdst, (UniformUnaryFrag<not> (or_oneuse i32:$src0, i32:$src1)))]
>;
def S_NOR_B64 : SOP2_64 <"s_nor_b64",
[(set i64:$sdst, (not (or_oneuse i64:$src0, i64:$src1)))]
[(set i64:$sdst, (UniformUnaryFrag<not> (or_oneuse i64:$src0, i64:$src1)))]
>;
} // End isCommutable = 1

View File

@ -667,6 +667,14 @@ def : ThreeOp_i32_Pats<xor, add, V_XAD_U32_e64>;
def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>;
def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>;
def : GCNPat<(getDivergentFrag<or>.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2),
(REG_SEQUENCE VReg_64,
(V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)),
(i32 (EXTRACT_SUBREG $src1, sub0)),
(i32 (EXTRACT_SUBREG $src2, sub0))), sub0,
(V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)),
(i32 (EXTRACT_SUBREG $src1, sub1)),
(i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>;
// FIXME: Probably should hardcode clamp bit in pseudo and avoid this.
class OpSelBinOpClampPat<SDPatternOperator node,

View File

@ -9,8 +9,7 @@ define amdgpu_kernel void @divergent_or3_b32(<3 x i32> addrspace(1)* %arg) {
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_or_b32_e32 v0, v1, v0
; GCN-NEXT: v_or_b32_e32 v0, v0, v2
; GCN-NEXT: v_or3_b32 v0, v1, v0, v2
; GCN-NEXT: v_not_b32_e32 v0, v0
; GCN-NEXT: global_store_dword v3, v0, s[0:1]
; GCN-NEXT: s_endpgm
@ -39,10 +38,8 @@ define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) {
; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16
; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_or_b32_e32 v1, v3, v1
; GCN-NEXT: v_or_b32_e32 v0, v2, v0
; GCN-NEXT: v_or_b32_e32 v0, v0, v4
; GCN-NEXT: v_or_b32_e32 v1, v1, v5
; GCN-NEXT: v_or3_b32 v1, v3, v1, v5
; GCN-NEXT: v_or3_b32 v0, v2, v0, v4
; GCN-NEXT: v_not_b32_e32 v0, v0
; GCN-NEXT: v_not_b32_e32 v1, v1
; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1]
@ -103,8 +100,8 @@ define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) {
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_and_b32_e32 v1, v3, v1
; GCN-NEXT: v_and_b32_e32 v0, v2, v0
; GCN-NEXT: v_and_b32_e32 v0, v0, v4
; GCN-NEXT: v_and_b32_e32 v1, v1, v5
; GCN-NEXT: v_and_b32_e32 v0, v0, v4
; GCN-NEXT: v_not_b32_e32 v0, v0
; GCN-NEXT: v_not_b32_e32 v1, v1
; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1]