diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td index ff7c0a87a7f7..5f63970c6697 100644 --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -558,19 +558,19 @@ def S_XNOR_B64 : SOP2_64 <"s_xnor_b64", >; def S_NAND_B32 : SOP2_32 <"s_nand_b32", - [(set i32:$sdst, (not (and_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag (and_oneuse i32:$src0, i32:$src1)))] >; def S_NAND_B64 : SOP2_64 <"s_nand_b64", - [(set i64:$sdst, (not (and_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag (and_oneuse i64:$src0, i64:$src1)))] >; def S_NOR_B32 : SOP2_32 <"s_nor_b32", - [(set i32:$sdst, (not (or_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag (or_oneuse i32:$src0, i32:$src1)))] >; def S_NOR_B64 : SOP2_64 <"s_nor_b64", - [(set i64:$sdst, (not (or_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag (or_oneuse i64:$src0, i64:$src1)))] >; } // End isCommutable = 1 diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index 4b216a4ec157..8c6d1884c5c5 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -667,6 +667,14 @@ def : ThreeOp_i32_Pats; def : VOPBinOpClampPat; def : VOPBinOpClampPat; +def : GCNPat<(getDivergentFrag.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2), + (REG_SEQUENCE VReg_64, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)), + (i32 (EXTRACT_SUBREG $src1, sub0)), + (i32 (EXTRACT_SUBREG $src2, sub0))), sub0, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)), + (i32 (EXTRACT_SUBREG $src1, sub1)), + (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>; // FIXME: Probably should hardcode clamp bit in pseudo and avoid this. class OpSelBinOpClampPat addrspace(1)* %arg) { ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_or_b32_e32 v0, v1, v0 -; GCN-NEXT: v_or_b32_e32 v0, v0, v2 +; GCN-NEXT: v_or3_b32 v0, v1, v0, v2 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: global_store_dword v3, v0, s[0:1] ; GCN-NEXT: s_endpgm @@ -39,10 +38,8 @@ define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) { ; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 ; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-NEXT: v_or_b32_e32 v0, v0, v4 -; GCN-NEXT: v_or_b32_e32 v1, v1, v5 +; GCN-NEXT: v_or3_b32 v1, v3, v1, v5 +; GCN-NEXT: v_or3_b32 v0, v2, v0, v4 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: v_not_b32_e32 v1, v1 ; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] @@ -103,8 +100,8 @@ define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) { ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_and_b32_e32 v1, v3, v1 ; GCN-NEXT: v_and_b32_e32 v0, v2, v0 -; GCN-NEXT: v_and_b32_e32 v0, v0, v4 ; GCN-NEXT: v_and_b32_e32 v1, v1, v5 +; GCN-NEXT: v_and_b32_e32 v0, v0, v4 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: v_not_b32_e32 v1, v1 ; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1]