forked from OSchip/llvm-project
[AMDGPU] New llvm.amdgcn.ballot intrinsic
Add a new llvm.amdgcn.ballot intrinsic modeled on the ballot function in GLSL and other shader languages. It returns a bitfield containing the result of its boolean argument in all active lanes, and zero in all inactive lanes. This is intended to replace the existing llvm.amdgcn.icmp and llvm.amdgcn.fcmp intrinsics after a suitable transition period. Use the new intrinsic in the atomic optimizer pass. Differential Revision: https://reviews.llvm.org/D65088
This commit is contained in:
parent
0b6f40da45
commit
5d3a69feca
|
@ -1348,6 +1348,9 @@ def int_amdgcn_fcmp :
|
|||
Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty],
|
||||
[IntrNoMem, IntrConvergent, ImmArg<2>]>;
|
||||
|
||||
def int_amdgcn_ballot :
|
||||
Intrinsic<[llvm_anyint_ty], [llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
|
||||
|
||||
def int_amdgcn_readfirstlane :
|
||||
GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
|
||||
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
||||
|
|
|
@ -447,9 +447,8 @@ void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
|
|||
// We need to know how many lanes are active within the wavefront, and we do
|
||||
// this by doing a ballot of active lanes.
|
||||
Type *const WaveTy = B.getIntNTy(ST->getWavefrontSize());
|
||||
CallInst *const Ballot = B.CreateIntrinsic(
|
||||
Intrinsic::amdgcn_icmp, {WaveTy, B.getInt32Ty()},
|
||||
{B.getInt32(1), B.getInt32(0), B.getInt32(CmpInst::ICMP_NE)});
|
||||
CallInst *const Ballot =
|
||||
B.CreateIntrinsic(Intrinsic::amdgcn_ballot, WaveTy, B.getTrue());
|
||||
|
||||
// We need to know how many lanes are active within the wavefront that are
|
||||
// below us. If we counted each lane linearly starting from 0, a lane is
|
||||
|
|
|
@ -804,6 +804,7 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
|
|||
case Intrinsic::amdgcn_readlane:
|
||||
case Intrinsic::amdgcn_icmp:
|
||||
case Intrinsic::amdgcn_fcmp:
|
||||
case Intrinsic::amdgcn_ballot:
|
||||
case Intrinsic::amdgcn_if_break:
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -4264,6 +4264,43 @@ static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
|
|||
return DAG.getZExtOrTrunc(SetCC, SL, VT);
|
||||
}
|
||||
|
||||
static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
|
||||
SelectionDAG &DAG) {
|
||||
EVT VT = N->getValueType(0);
|
||||
SDValue Src = N->getOperand(1);
|
||||
SDLoc SL(N);
|
||||
|
||||
if (Src.getOpcode() == ISD::SETCC) {
|
||||
// (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...)
|
||||
return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0),
|
||||
Src.getOperand(1), Src.getOperand(2));
|
||||
}
|
||||
if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) {
|
||||
// (ballot 0) -> 0
|
||||
if (Arg->isNullValue())
|
||||
return DAG.getConstant(0, SL, VT);
|
||||
|
||||
// (ballot 1) -> EXEC/EXEC_LO
|
||||
if (Arg->isOne()) {
|
||||
Register Exec;
|
||||
if (VT.getScalarSizeInBits() == 32)
|
||||
Exec = AMDGPU::EXEC_LO;
|
||||
else if (VT.getScalarSizeInBits() == 64)
|
||||
Exec = AMDGPU::EXEC;
|
||||
else
|
||||
return SDValue();
|
||||
|
||||
return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT);
|
||||
}
|
||||
}
|
||||
|
||||
// (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0)
|
||||
// ISD::SETNE)
|
||||
return DAG.getNode(
|
||||
AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32),
|
||||
DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE));
|
||||
}
|
||||
|
||||
void SITargetLowering::ReplaceNodeResults(SDNode *N,
|
||||
SmallVectorImpl<SDValue> &Results,
|
||||
SelectionDAG &DAG) const {
|
||||
|
@ -5982,6 +6019,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
case Intrinsic::amdgcn_fcmp: {
|
||||
return lowerFCMPIntrinsic(*this, Op.getNode(), DAG);
|
||||
}
|
||||
case Intrinsic::amdgcn_ballot:
|
||||
return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG);
|
||||
case Intrinsic::amdgcn_fmed3:
|
||||
return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
|
||||
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
|
||||
|
|
|
@ -3955,6 +3955,35 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
break;
|
||||
}
|
||||
case Intrinsic::amdgcn_ballot: {
|
||||
if (auto *Src = dyn_cast<ConstantInt>(II->getArgOperand(0))) {
|
||||
if (Src->isZero()) {
|
||||
// amdgcn.ballot(i1 0) is zero.
|
||||
return replaceInstUsesWith(*II, Constant::getNullValue(II->getType()));
|
||||
}
|
||||
|
||||
if (Src->isOne()) {
|
||||
// amdgcn.ballot(i1 1) is exec.
|
||||
const char *RegName = "exec";
|
||||
if (II->getType()->isIntegerTy(32))
|
||||
RegName = "exec_lo";
|
||||
else if (!II->getType()->isIntegerTy(64))
|
||||
break;
|
||||
|
||||
Function *NewF = Intrinsic::getDeclaration(
|
||||
II->getModule(), Intrinsic::read_register, II->getType());
|
||||
Metadata *MDArgs[] = {MDString::get(II->getContext(), RegName)};
|
||||
MDNode *MD = MDNode::get(II->getContext(), MDArgs);
|
||||
Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
|
||||
CallInst *NewCall = Builder.CreateCall(NewF, Args);
|
||||
NewCall->addAttribute(AttributeList::FunctionIndex,
|
||||
Attribute::Convergent);
|
||||
NewCall->takeName(II);
|
||||
return replaceInstUsesWith(*II, NewCall);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::amdgcn_wqm_vote: {
|
||||
// wqm_vote is identity when the argument is constant.
|
||||
if (!isa<Constant>(II->getArgOperand(0)))
|
||||
|
@ -4179,7 +4208,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
return replaceOperand(*II, 2,
|
||||
ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
|
||||
}
|
||||
|
||||
|
||||
// Translate facts known about a pointer before relocating into
|
||||
// facts about the relocate value, while being careful to
|
||||
// preserve relocation semantics.
|
||||
|
|
|
@ -23,6 +23,13 @@ define amdgpu_kernel void @fcmp(float inreg %x, float inreg %y) {
|
|||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: for function 'ballot':
|
||||
define amdgpu_kernel void @ballot(i1 inreg %x) {
|
||||
; CHECK-NOT: DIVERGENT: %ballot = call i64 @llvm.amdgcn.ballot.i32
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i32(i1 %x)
|
||||
ret void
|
||||
}
|
||||
|
||||
; SGPR asm outputs are uniform regardless of the input operands.
|
||||
; CHECK-LABEL: for function 'asm_sgpr':
|
||||
; CHECK: DIVERGENT: i32 %divergent
|
||||
|
@ -49,6 +56,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
|
|||
declare i32 @llvm.amdgcn.readfirstlane(i32) #0
|
||||
declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #1
|
||||
declare i64 @llvm.amdgcn.fcmp.i32(float, float, i32) #1
|
||||
declare i64 @llvm.amdgcn.ballot.i32(i1) #1
|
||||
|
||||
attributes #0 = { nounwind readnone }
|
||||
attributes #1 = { nounwind readnone convergent }
|
||||
|
|
|
@ -9,14 +9,15 @@ declare i32 @llvm.amdgcn.raw.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32 imm
|
|||
declare i32 @llvm.amdgcn.struct.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32, i32 immarg)
|
||||
declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32 immarg)
|
||||
|
||||
; Show that what the atomic optimization pass will do for raw buffers.
|
||||
; Show what the atomic optimization pass will do for raw buffers.
|
||||
|
||||
; GCN-LABEL: add_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -29,11 +30,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
@ -108,11 +110,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -125,11 +128,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
|
|
@ -6,14 +6,15 @@
|
|||
|
||||
declare i32 @llvm.amdgcn.workitem.id.x()
|
||||
|
||||
; Show that what the atomic optimization pass will do for global pointers.
|
||||
; Show what the atomic optimization pass will do for global pointers.
|
||||
|
||||
; GCN-LABEL: add_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -26,11 +27,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
@ -63,11 +65,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i64_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -81,13 +84,14 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i64_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN32: s_bcnt1_i32_b32 s{{[0-9]+}}, s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s{{[0-9]+}}, s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: {{flat|buffer|global}}_atomic_add_x2 v{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}
|
||||
define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 addrspace(1)* %inout, i64 %additive) {
|
||||
entry:
|
||||
|
@ -111,11 +115,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -128,11 +133,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
@ -165,11 +171,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i64_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -183,13 +190,14 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i64_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN32: s_bcnt1_i32_b32 s{{[0-9]+}}, s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s{{[0-9]+}}, s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: {{flat|buffer|global}}_atomic_sub_x2 v{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}
|
||||
define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 addrspace(1)* %inout, i64 %subitive) {
|
||||
entry:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,7 +19,7 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
|
|||
; GFX7-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX7-NEXT: s_cbranch_execz BB0_4
|
||||
; GFX7-NEXT: ; %bb.1:
|
||||
; GFX7-NEXT: v_cmp_ne_u32_e64 s[12:13], 1, 0
|
||||
; GFX7-NEXT: s_mov_b64 s[12:13], exec
|
||||
; GFX7-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s12, 0
|
||||
; GFX7-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s13, v0
|
||||
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
||||
|
@ -52,7 +52,7 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
|
|||
; GFX8-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX8-NEXT: s_cbranch_execz BB0_4
|
||||
; GFX8-NEXT: ; %bb.1:
|
||||
; GFX8-NEXT: v_cmp_ne_u32_e64 s[12:13], 1, 0
|
||||
; GFX8-NEXT: s_mov_b64 s[12:13], exec
|
||||
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s12, 0
|
||||
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s13, v0
|
||||
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
||||
|
@ -85,7 +85,7 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
|
|||
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX9-NEXT: s_cbranch_execz BB0_4
|
||||
; GFX9-NEXT: ; %bb.1:
|
||||
; GFX9-NEXT: v_cmp_ne_u32_e64 s[12:13], 1, 0
|
||||
; GFX9-NEXT: s_mov_b64 s[12:13], exec
|
||||
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s12, 0
|
||||
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s13, v0
|
||||
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
||||
|
@ -118,7 +118,7 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
|
|||
; GFX1064-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX1064-NEXT: s_cbranch_execz BB0_4
|
||||
; GFX1064-NEXT: ; %bb.1:
|
||||
; GFX1064-NEXT: v_cmp_ne_u32_e64 s[12:13], 1, 0
|
||||
; GFX1064-NEXT: s_mov_b64 s[12:13], exec
|
||||
; GFX1064-NEXT: ; implicit-def: $vgpr1
|
||||
; GFX1064-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s12, 0
|
||||
; GFX1064-NEXT: v_mbcnt_hi_u32_b32_e64 v0, s13, v0
|
||||
|
@ -153,7 +153,7 @@ define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %i
|
|||
; GFX1032-NEXT: s_and_saveexec_b32 s8, s9
|
||||
; GFX1032-NEXT: s_cbranch_execz BB0_4
|
||||
; GFX1032-NEXT: ; %bb.1:
|
||||
; GFX1032-NEXT: v_cmp_ne_u32_e64 s10, 1, 0
|
||||
; GFX1032-NEXT: s_mov_b32 s10, exec_lo
|
||||
; GFX1032-NEXT: ; implicit-def: $vgpr1
|
||||
; GFX1032-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s10, 0
|
||||
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
|
||||
|
@ -213,10 +213,10 @@ define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %in
|
|||
; GFX8-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX8-NEXT: s_cbranch_execz BB1_4
|
||||
; GFX8-NEXT: ; %bb.1:
|
||||
; GFX8-NEXT: s_or_saveexec_b64 s[10:11], -1
|
||||
; GFX8-NEXT: s_mov_b64 s[10:11], exec
|
||||
; GFX8-NEXT: s_or_saveexec_b64 s[12:13], -1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX8-NEXT: s_mov_b64 exec, s[10:11]
|
||||
; GFX8-NEXT: v_cmp_ne_u32_e64 s[10:11], 1, 0
|
||||
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
|
||||
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s10, 0
|
||||
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, s11, v0
|
||||
; GFX8-NEXT: s_not_b64 exec, exec
|
||||
|
@ -270,10 +270,10 @@ define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %in
|
|||
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX9-NEXT: s_cbranch_execz BB1_4
|
||||
; GFX9-NEXT: ; %bb.1:
|
||||
; GFX9-NEXT: s_or_saveexec_b64 s[10:11], -1
|
||||
; GFX9-NEXT: s_mov_b64 s[10:11], exec
|
||||
; GFX9-NEXT: s_or_saveexec_b64 s[12:13], -1
|
||||
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX9-NEXT: s_mov_b64 exec, s[10:11]
|
||||
; GFX9-NEXT: v_cmp_ne_u32_e64 s[10:11], 1, 0
|
||||
; GFX9-NEXT: s_mov_b64 exec, s[12:13]
|
||||
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s10, 0
|
||||
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s11, v0
|
||||
; GFX9-NEXT: s_not_b64 exec, exec
|
||||
|
@ -327,10 +327,10 @@ define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %in
|
|||
; GFX1064-NEXT: s_and_saveexec_b64 s[8:9], s[10:11]
|
||||
; GFX1064-NEXT: s_cbranch_execz BB1_4
|
||||
; GFX1064-NEXT: ; %bb.1:
|
||||
; GFX1064-NEXT: s_or_saveexec_b64 s[10:11], -1
|
||||
; GFX1064-NEXT: s_mov_b64 s[10:11], exec
|
||||
; GFX1064-NEXT: s_or_saveexec_b64 s[12:13], -1
|
||||
; GFX1064-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX1064-NEXT: s_mov_b64 exec, s[10:11]
|
||||
; GFX1064-NEXT: v_cmp_ne_u32_e64 s[10:11], 1, 0
|
||||
; GFX1064-NEXT: s_mov_b64 exec, s[12:13]
|
||||
; GFX1064-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s10, 0
|
||||
; GFX1064-NEXT: v_mbcnt_hi_u32_b32_e64 v0, s11, v0
|
||||
; GFX1064-NEXT: s_not_b64 exec, exec
|
||||
|
@ -389,10 +389,10 @@ define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %in
|
|||
; GFX1032-NEXT: s_and_saveexec_b32 s8, s9
|
||||
; GFX1032-NEXT: s_cbranch_execz BB1_4
|
||||
; GFX1032-NEXT: ; %bb.1:
|
||||
; GFX1032-NEXT: s_or_saveexec_b32 s9, -1
|
||||
; GFX1032-NEXT: s_mov_b32 s9, exec_lo
|
||||
; GFX1032-NEXT: s_or_saveexec_b32 s10, -1
|
||||
; GFX1032-NEXT: v_mov_b32_e32 v1, 0
|
||||
; GFX1032-NEXT: s_mov_b32 exec_lo, s9
|
||||
; GFX1032-NEXT: v_cmp_ne_u32_e64 s9, 1, 0
|
||||
; GFX1032-NEXT: s_mov_b32 exec_lo, s10
|
||||
; GFX1032-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s9, 0
|
||||
; GFX1032-NEXT: s_not_b32 exec_lo, exec_lo
|
||||
; GFX1032-NEXT: v_mov_b32_e32 v2, 0
|
||||
|
|
|
@ -8,14 +8,15 @@ declare i32 @llvm.amdgcn.workitem.id.x()
|
|||
declare i32 @llvm.amdgcn.raw.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32)
|
||||
declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32)
|
||||
|
||||
; Show that what the atomic optimization pass will do for raw buffers.
|
||||
; Show what the atomic optimization pass will do for raw buffers.
|
||||
|
||||
; GCN-LABEL: add_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -28,11 +29,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
@ -78,11 +80,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -95,11 +98,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
|
|
@ -8,14 +8,15 @@ declare i32 @llvm.amdgcn.workitem.id.x()
|
|||
declare i32 @llvm.amdgcn.struct.buffer.atomic.add(i32, <4 x i32>, i32, i32, i32, i32)
|
||||
declare i32 @llvm.amdgcn.struct.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32, i32)
|
||||
|
||||
; Show that what the atomic optimization pass will do for struct buffers.
|
||||
; Show what the atomic optimization pass will do for struct buffers.
|
||||
|
||||
; GCN-LABEL: add_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -28,11 +29,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: add_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
@ -91,11 +93,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_constant:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
|
||||
|
@ -108,11 +111,12 @@ entry:
|
|||
}
|
||||
|
||||
; GCN-LABEL: sub_i32_uniform:
|
||||
; GCN32: v_cmp_ne_u32_e64 s[[exec_lo:[0-9]+]], 1, 0
|
||||
; GCN64: v_cmp_ne_u32_e64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, 1, 0
|
||||
; GCN32: s_mov_b32 s[[exec_lo:[0-9]+]], exec_lo
|
||||
; GCN64: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
|
||||
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt:[0-9]+]], s[[exec_lo]], 0
|
||||
; GCN64: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt]], s[[exec_hi]], v[[mbcnt]]
|
||||
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc{{(_lo)?}}, 0, v[[mbcnt]]
|
||||
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
|
||||
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
|
||||
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
|
||||
; GCN: s_mul_i32 s[[scalar_value:[0-9]+]], s{{[0-9]+}}, s[[popcount]]
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck %s
|
||||
|
||||
declare i32 @llvm.amdgcn.ballot.i32(i1)
|
||||
|
||||
; Test ballot(0)
|
||||
|
||||
define i32 @test0() {
|
||||
; CHECK-LABEL: test0:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, 0
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 0)
|
||||
ret i32 %ballot
|
||||
}
|
||||
|
||||
; Test ballot(1)
|
||||
|
||||
define i32 @test1() {
|
||||
; CHECK-LABEL: test1:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, exec_lo
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 1)
|
||||
ret i32 %ballot
|
||||
}
|
||||
|
||||
; Test ballot of a non-comparison operation
|
||||
|
||||
define i32 @test2(i32 %x) {
|
||||
; CHECK-LABEL: test2:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: v_cmp_ne_u32_e64 s4, 0, v0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%trunc = trunc i32 %x to i1
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %trunc)
|
||||
ret i32 %ballot
|
||||
}
|
||||
|
||||
; Test ballot of comparisons
|
||||
|
||||
define i32 @test3(i32 %x, i32 %y) {
|
||||
; CHECK-LABEL: test3:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_cmp_eq_u32_e64 s4, v0, v1
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = icmp eq i32 %x, %y
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp)
|
||||
ret i32 %ballot
|
||||
}
|
||||
|
||||
define i32 @test4(i32 %x) {
|
||||
; CHECK-LABEL: test4:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_cmp_lt_i32_e64 s4, 0x62, v0
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = icmp sge i32 %x, 99
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp)
|
||||
ret i32 %ballot
|
||||
}
|
||||
|
||||
define i32 @test5(float %x, float %y) {
|
||||
; CHECK-LABEL: test5:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; CHECK-NEXT: v_cmp_gt_f32_e64 s4, v0, v1
|
||||
; CHECK-NEXT: ; implicit-def: $vcc_hi
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = fcmp ogt float %x, %y
|
||||
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp)
|
||||
ret i32 %ballot
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck %s
|
||||
|
||||
declare i64 @llvm.amdgcn.ballot.i64(i1)
|
||||
|
||||
; Test ballot(0)
|
||||
|
||||
define i64 @test0() {
|
||||
; CHECK-LABEL: test0:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, 0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, 0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 0)
|
||||
ret i64 %ballot
|
||||
}
|
||||
|
||||
; Test ballot(1)
|
||||
|
||||
define i64 @test1() {
|
||||
; CHECK-LABEL: test1:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, exec_lo
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, exec_hi
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 1)
|
||||
ret i64 %ballot
|
||||
}
|
||||
|
||||
; Test ballot of a non-comparison operation
|
||||
|
||||
define i64 @test2(i32 %x) {
|
||||
; CHECK-LABEL: test2:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
|
||||
; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, s5
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%trunc = trunc i32 %x to i1
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %trunc)
|
||||
ret i64 %ballot
|
||||
}
|
||||
|
||||
; Test ballot of comparisons
|
||||
|
||||
define i64 @test3(i32 %x, i32 %y) {
|
||||
; CHECK-LABEL: test3:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], v0, v1
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, s5
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = icmp eq i32 %x, %y
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
|
||||
ret i64 %ballot
|
||||
}
|
||||
|
||||
define i64 @test4(i32 %x) {
|
||||
; CHECK-LABEL: test4:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: s_movk_i32 s4, 0x62
|
||||
; CHECK-NEXT: v_cmp_lt_i32_e64 s[4:5], s4, v0
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, s5
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = icmp sge i32 %x, 99
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
|
||||
ret i64 %ballot
|
||||
}
|
||||
|
||||
define i64 @test5(float %x, float %y) {
|
||||
; CHECK-LABEL: test5:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_cmp_gt_f32_e64 s[4:5], v0, v1
|
||||
; CHECK-NEXT: v_mov_b32_e32 v0, s4
|
||||
; CHECK-NEXT: v_mov_b32_e32 v1, s5
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
%cmp = fcmp ogt float %x, %y
|
||||
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
|
||||
ret i64 %ballot
|
||||
}
|
|
@ -2378,6 +2378,65 @@ define i64 @fcmp_constant_to_rhs_olt(float %x) {
|
|||
ret i64 %result
|
||||
}
|
||||
|
||||
; --------------------------------------------------------------------
|
||||
; llvm.amdgcn.ballot
|
||||
; --------------------------------------------------------------------
|
||||
|
||||
declare i64 @llvm.amdgcn.ballot.i64(i1) nounwind readnone convergent
|
||||
declare i32 @llvm.amdgcn.ballot.i32(i1) nounwind readnone convergent
|
||||
|
||||
define i64 @ballot_nocombine_64(i1 %i) {
|
||||
; CHECK-LABEL: @ballot_nocombine_64(
|
||||
; CHECK-NEXT: %b = call i64 @llvm.amdgcn.ballot.i64(i1 %i)
|
||||
; CHECK-NEXT: ret i64 %b
|
||||
;
|
||||
%b = call i64 @llvm.amdgcn.ballot.i64(i1 %i)
|
||||
ret i64 %b
|
||||
}
|
||||
|
||||
define i64 @ballot_zero_64() {
|
||||
; CHECK-LABEL: @ballot_zero_64(
|
||||
; CHECK-NEXT: ret i64 0
|
||||
;
|
||||
%b = call i64 @llvm.amdgcn.ballot.i64(i1 0)
|
||||
ret i64 %b
|
||||
}
|
||||
|
||||
define i64 @ballot_one_64() {
|
||||
; CHECK-LABEL: @ballot_one_64(
|
||||
; CHECK-NEXT: %b = call i64 @llvm.read_register.i64(metadata !0) [[CONVERGENT]]
|
||||
; CHECK-NEXT: ret i64 %b
|
||||
;
|
||||
%b = call i64 @llvm.amdgcn.ballot.i64(i1 1)
|
||||
ret i64 %b
|
||||
}
|
||||
|
||||
define i32 @ballot_nocombine_32(i1 %i) {
|
||||
; CHECK-LABEL: @ballot_nocombine_32(
|
||||
; CHECK-NEXT: %b = call i32 @llvm.amdgcn.ballot.i32(i1 %i)
|
||||
; CHECK-NEXT: ret i32 %b
|
||||
;
|
||||
%b = call i32 @llvm.amdgcn.ballot.i32(i1 %i)
|
||||
ret i32 %b
|
||||
}
|
||||
|
||||
define i32 @ballot_zero_32() {
|
||||
; CHECK-LABEL: @ballot_zero_32(
|
||||
; CHECK-NEXT: ret i32 0
|
||||
;
|
||||
%b = call i32 @llvm.amdgcn.ballot.i32(i1 0)
|
||||
ret i32 %b
|
||||
}
|
||||
|
||||
define i32 @ballot_one_32() {
|
||||
; CHECK-LABEL: @ballot_one_32(
|
||||
; CHECK-NEXT: %b = call i32 @llvm.read_register.i32(metadata !1) [[CONVERGENT]]
|
||||
; CHECK-NEXT: ret i32 %b
|
||||
;
|
||||
%b = call i32 @llvm.amdgcn.ballot.i32(i1 1)
|
||||
ret i32 %b
|
||||
}
|
||||
|
||||
; --------------------------------------------------------------------
|
||||
; llvm.amdgcn.wqm.vote
|
||||
; --------------------------------------------------------------------
|
||||
|
|
Loading…
Reference in New Issue