[AMDGPU] simplify add x, *ext (setcc) => addc|subb x, 0, setcc

This simplification allows to avoid generating v_cndmask_b32
to serialize condition code between compare and use.

Differential Revision: https://reviews.llvm.org/D34300

llvm-svn: 305962
This commit is contained in:
Stanislav Mekhanoshin 2017-06-21 22:05:06 +00:00
parent 1b587358be
commit e3eb42cef6
5 changed files with 102 additions and 0 deletions

View File

@ -70,6 +70,10 @@ def AMDGPUElseBreakOp : SDTypeProfile<1, 2,
[SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, i64>]
>;
def AMDGPUAddeSubeOp : SDTypeProfile<2, 3,
[SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, i32>, SDTCisVT<1, i1>, SDTCisVT<4, i1>]
>;
//===----------------------------------------------------------------------===//
// AMDGPU DAG Nodes
//
@ -179,6 +183,12 @@ def AMDGPUcarry : SDNode<"AMDGPUISD::CARRY", SDTIntBinOp, []>;
// out = (src1 > src0) ? 1 : 0
def AMDGPUborrow : SDNode<"AMDGPUISD::BORROW", SDTIntBinOp, []>;
// TODO: remove AMDGPUadde/AMDGPUsube when ADDCARRY/SUBCARRY get their own
// nodes in TargetSelectionDAG.td.
def AMDGPUadde : SDNode<"ISD::ADDCARRY", AMDGPUAddeSubeOp, []>;
def AMDGPUsube : SDNode<"ISD::SUBCARRY", AMDGPUAddeSubeOp, []>;
def AMDGPUSetCCOp : SDTypeProfile<1, 3, [ // setcc
SDTCisVT<0, i64>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>;

View File

@ -211,6 +211,9 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UADDO, MVT::i32, Legal);
setOperationAction(ISD::USUBO, MVT::i32, Legal);
setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
@ -471,6 +474,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT, VT, Custom);
}
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
setTargetDAGCombine(ISD::FMINNUM);
@ -4839,6 +4843,39 @@ unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
return 0;
}
SDValue SITargetLowering::performAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
SDLoc SL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
// add x, zext (setcc) => addcarry x, 0, setcc
// add x, sext (setcc) => subcarry x, 0, setcc
unsigned Opc = LHS.getOpcode();
if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Opc == ISD::ANY_EXTEND)
std::swap(RHS, LHS);
Opc = RHS.getOpcode();
if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
Opc == ISD::ANY_EXTEND) {
auto Cond = RHS.getOperand(0);
if (Cond.getOpcode() == ISD::SETCC) {
SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
return DAG.getNode(Opc, SL, VTList, Args);
}
}
return SDValue();
}
SDValue SITargetLowering::performFAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
@ -5009,6 +5046,8 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
switch (N->getOpcode()) {
default:
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
case ISD::ADD:
return performAddCombine(N, DCI);
case ISD::FADD:
return performFAddCombine(N, DCI);
case ISD::FSUB:

View File

@ -108,6 +108,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
unsigned getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0, const SDNode *N1) const;
SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;

View File

@ -392,6 +392,15 @@ defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_I32_I32_I32>;
} // End SubtargetPredicate = isGCN
def : Pat<
(AMDGPUadde i32:$src0, i32:$src1, i1:$src2),
(V_ADDC_U32_e64 $src0, $src1, $src2)
>;
def : Pat<
(AMDGPUsube i32:$src0, i32:$src1, i1:$src2),
(V_SUBB_U32_e64 $src0, $src1, $src2)
>;
// These instructions only exist on SI and CI
let SubtargetPredicate = isSICI in {

View File

@ -0,0 +1,43 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}add1:
; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
; GCN-NOT: v_cndmask
define amdgpu_kernel void @add1(i32 addrspace(1)* nocapture %arg) {
bb:
%x = tail call i32 @llvm.amdgcn.workitem.id.x()
%y = tail call i32 @llvm.amdgcn.workitem.id.y()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
%v = load i32, i32 addrspace(1)* %gep, align 4
%cmp = icmp ugt i32 %x, %y
%ext = zext i1 %cmp to i32
%add = add i32 %v, %ext
store i32 %add, i32 addrspace(1)* %gep, align 4
ret void
}
; GCN-LABEL: {{^}}sub1:
; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, 0, [[CC]]
; GCN-NOT: v_cndmask
define amdgpu_kernel void @sub1(i32 addrspace(1)* nocapture %arg) {
bb:
%x = tail call i32 @llvm.amdgcn.workitem.id.x()
%y = tail call i32 @llvm.amdgcn.workitem.id.y()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
%v = load i32, i32 addrspace(1)* %gep, align 4
%cmp = icmp ugt i32 %x, %y
%ext = sext i1 %cmp to i32
%add = add i32 %v, %ext
store i32 %add, i32 addrspace(1)* %gep, align 4
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #0
declare i32 @llvm.amdgcn.workitem.id.y() #0
attributes #0 = { nounwind readnone speculatable }