[AMDGPU] Implement widening multiplies with v_mad_i64_i32/v_mad_u64_u32

Select SelectionDAG ops smul_lohi/umul_lohi to
v_mad_i64_i32/v_mad_u64_u32 respectively, with an addend of 0.
v_mul_lo, v_mul_hi and v_mad_i64/u64 are all quarter-rate instructions
so it is better to use one instruction than two.

Further improvements are possible to make better use of the addend
operand, but this is already a strict improvement over what we have
now.

Differential Revision: https://reviews.llvm.org/D113986
This commit is contained in:
Jay Foad 2021-11-12 18:02:58 +00:00
parent 8a52bd82e3
commit d7e03df719
17 changed files with 1126 additions and 1130 deletions

View File

@ -654,6 +654,9 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) {
SelectMAD_64_32(N);
return;
}
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
return SelectMUL_LOHI(N);
case ISD::CopyToReg: {
const SITargetLowering& Lowering =
*static_cast<const SITargetLowering*>(getTargetLowering());
@ -1013,6 +1016,32 @@ void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
}
// We need to handle this here because tablegen doesn't support matching
// instructions with multiple outputs.
void AMDGPUDAGToDAGISel::SelectMUL_LOHI(SDNode *N) {
SDLoc SL(N);
bool Signed = N->getOpcode() == ISD::SMUL_LOHI;
unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
SDValue Zero = CurDAG->getTargetConstant(0, SL, MVT::i64);
SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Zero, Clamp};
SDNode *Mad = CurDAG->getMachineNode(Opc, SL, N->getVTList(), Ops);
if (!SDValue(N, 0).use_empty()) {
SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32);
SDNode *Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
MVT::i32, SDValue(Mad, 0), Sub0);
ReplaceUses(SDValue(N, 0), SDValue(Lo, 0));
}
if (!SDValue(N, 1).use_empty()) {
SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32);
SDNode *Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SL,
MVT::i32, SDValue(Mad, 0), Sub1);
ReplaceUses(SDValue(N, 1), SDValue(Hi, 0));
}
CurDAG->RemoveDeadNode(N);
}
bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
if (!isUInt<16>(Offset))
return false;

View File

@ -235,6 +235,7 @@ private:
void SelectUADDO_USUBO(SDNode *N);
void SelectDIV_SCALE(SDNode *N);
void SelectMAD_64_32(SDNode *N);
void SelectMUL_LOHI(SDNode *N);
void SelectFMA_W_CHAIN(SDNode *N);
void SelectFMUL_W_CHAIN(SDNode *N);
SDNode *getBFE32(bool IsSigned, const SDLoc &DL, SDValue Val, uint32_t Offset,

View File

@ -594,6 +594,8 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::SMUL_LOHI);
setTargetDAGCombine(ISD::UMUL_LOHI);
setTargetDAGCombine(ISD::MULHU);
setTargetDAGCombine(ISD::MULHS);
setTargetDAGCombine(ISD::SELECT);
@ -3462,6 +3464,50 @@ SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
return DAG.getSExtOrTrunc(Mul, DL, VT);
}
SDValue
AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (N->getValueType(0) != MVT::i32)
return SDValue();
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// SimplifyDemandedBits has the annoying habit of turning useful zero_extends
// in the source into any_extends if the result of the mul is truncated. Since
// we can assume the high bits are whatever we want, use the underlying value
// to avoid the unknown high bits from interfering.
if (N0.getOpcode() == ISD::ANY_EXTEND)
N0 = N0.getOperand(0);
if (N1.getOpcode() == ISD::ANY_EXTEND)
N1 = N1.getOperand(0);
// Try to use two fast 24-bit multiplies (one for each half of the result)
// instead of one slow extending multiply.
unsigned LoOpcode, HiOpcode;
if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
LoOpcode = AMDGPUISD::MUL_U24;
HiOpcode = AMDGPUISD::MULHI_U24;
} else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
LoOpcode = AMDGPUISD::MUL_I24;
HiOpcode = AMDGPUISD::MULHI_I24;
} else {
return SDValue();
}
SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
DCI.CombineTo(N, Lo, Hi);
return SDValue(N, 0);
}
SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
@ -4103,6 +4149,9 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
return performTruncateCombine(N, DCI);
case ISD::MUL:
return performMulCombine(N, DCI);
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
return performMulLoHiCombine(N, DCI);
case ISD::MULHS:
return performMulhsCombine(N, DCI);
case ISD::MULHU:

View File

@ -91,6 +91,7 @@ protected:
SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,

View File

@ -809,6 +809,11 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SMULO, MVT::i64, Custom);
setOperationAction(ISD::UMULO, MVT::i64, Custom);
if (Subtarget->hasMad64_32()) {
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
}
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
@ -4691,6 +4696,9 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SMULO:
case ISD::UMULO:
return lowerXMULO(Op, DAG);
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
return lowerXMUL_LOHI(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG);
}
@ -5304,6 +5312,21 @@ SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const {
return DAG.getMergeValues({ Result, Overflow }, SL);
}
SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const {
if (Op->isDivergent()) {
// Select to V_MAD_[IU]64_[IU]32.
return Op;
}
if (Subtarget->hasSMulHi()) {
// Expand to S_MUL_I32 + S_MUL_HI_[IU]32.
return SDValue();
}
// The multiply is uniform but we would have to use V_MUL_HI_[IU]32 to
// calculate the high part, so we might as well do the whole thing with
// V_MAD_[IU]64_[IU]32.
return Op;
}
SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
if (!Subtarget->isTrapHandlerEnabled() ||
Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)

View File

@ -135,6 +135,7 @@ private:
SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerXMULO(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue getSegmentAperture(unsigned AS, const SDLoc &DL,
SelectionDAG &DAG) const;

View File

@ -818,32 +818,29 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX8-NEXT: s_mov_b32 s12, s6
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
; GFX8-NEXT: s_mov_b32 s13, s7
; GFX8-NEXT: s_mul_i32 s7, s1, s6
; GFX8-NEXT: s_mul_i32 s6, s0, s6
; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s0, v0, 0
; GFX8-NEXT: s_mul_i32 s6, s1, s6
; GFX8-NEXT: s_mov_b32 s15, 0xf000
; GFX8-NEXT: s_mov_b32 s14, -1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s7, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_mov_b32 s13, s7
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s6, v1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_atomic_add_x2 v[0:1], off, s[12:15], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
; GFX8-NEXT: .LBB4_2:
; GFX8-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8-NEXT: v_readfirstlane_b32 s2, v0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mul_lo_u32 v0, s1, v2
; GFX8-NEXT: v_mul_hi_u32 v3, s0, v2
; GFX8-NEXT: v_mul_lo_u32 v4, s1, v2
; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s0, v2, 0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_readfirstlane_b32 s1, v1
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v4
; GFX8-NEXT: v_mov_b32_e32 v3, s1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s2, v1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
@ -878,17 +875,16 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX9-NEXT: .LBB4_2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX9-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX9-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_mul_lo_u32 v0, s2, v2
; GFX9-NEXT: v_readfirstlane_b32 s1, v1
; GFX9-NEXT: v_add_u32_e32 v1, v4, v3
; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_add_u32_e32 v1, v3, v4
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v2
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX9-NEXT: s_endpgm
;
@ -927,14 +923,13 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1064-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1064-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX1064-NEXT: v_readfirstlane_b32 s0, v0
; GFX1064-NEXT: v_readfirstlane_b32 s1, v1
; GFX1064-NEXT: s_mov_b32 s7, 0x31016000
; GFX1064-NEXT: s_mov_b32 s6, -1
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1064-NEXT: v_add_co_u32 v0, vcc, s0, v2
; GFX1064-NEXT: v_add_co_ci_u32_e32 v1, vcc, s1, v1, vcc
; GFX1064-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@ -974,14 +969,13 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1032-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1032-NEXT: v_mad_u64_u32 v[2:3], s0, s2, v2, 0
; GFX1032-NEXT: v_readfirstlane_b32 s0, v0
; GFX1032-NEXT: v_readfirstlane_b32 s1, v1
; GFX1032-NEXT: s_mov_b32 s7, 0x31016000
; GFX1032-NEXT: s_mov_b32 s6, -1
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1032-NEXT: v_add_co_u32 v0, vcc_lo, s0, v2
; GFX1032-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
; GFX1032-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@ -1955,32 +1949,29 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX8-NEXT: s_mov_b32 s12, s6
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_mul_hi_u32 v0, s0, v0
; GFX8-NEXT: s_mov_b32 s13, s7
; GFX8-NEXT: s_mul_i32 s7, s1, s6
; GFX8-NEXT: s_mul_i32 s6, s0, s6
; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s0, v0, 0
; GFX8-NEXT: s_mul_i32 s6, s1, s6
; GFX8-NEXT: s_mov_b32 s15, 0xf000
; GFX8-NEXT: s_mov_b32 s14, -1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s7, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_mov_b32 s13, s7
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s6, v1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[12:15], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
; GFX8-NEXT: .LBB10_2:
; GFX8-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX8-NEXT: v_readfirstlane_b32 s2, v0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mul_lo_u32 v0, s1, v2
; GFX8-NEXT: v_mul_hi_u32 v3, s0, v2
; GFX8-NEXT: v_mul_lo_u32 v4, s1, v2
; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s0, v2, 0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_readfirstlane_b32 s1, v1
; GFX8-NEXT: v_mul_lo_u32 v1, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v4
; GFX8-NEXT: v_mov_b32_e32 v3, s1
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s2, v1
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v2, vcc
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v1, vcc
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
@ -2015,17 +2006,16 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX9-NEXT: .LBB10_2:
; GFX9-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX9-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX9-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_mul_lo_u32 v0, s2, v2
; GFX9-NEXT: v_readfirstlane_b32 s1, v1
; GFX9-NEXT: v_add_u32_e32 v1, v4, v3
; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_add_u32_e32 v1, v3, v4
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s0, v2
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX9-NEXT: s_endpgm
;
@ -2064,14 +2054,13 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1064-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1064-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX1064-NEXT: v_readfirstlane_b32 s0, v0
; GFX1064-NEXT: v_readfirstlane_b32 s1, v1
; GFX1064-NEXT: s_mov_b32 s7, 0x31016000
; GFX1064-NEXT: s_mov_b32 s6, -1
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1064-NEXT: v_sub_co_u32 v0, vcc, s0, v2
; GFX1064-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s1, v1, vcc
; GFX1064-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@ -2111,14 +2100,13 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 addrspace
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1032-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1032-NEXT: v_mad_u64_u32 v[2:3], s0, s2, v2, 0
; GFX1032-NEXT: v_readfirstlane_b32 s0, v0
; GFX1032-NEXT: v_readfirstlane_b32 s1, v1
; GFX1032-NEXT: s_mov_b32 s7, 0x31016000
; GFX1032-NEXT: s_mov_b32 s6, -1
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1032-NEXT: v_sub_co_u32 v0, vcc_lo, s0, v2
; GFX1032-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
; GFX1032-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0

View File

@ -954,15 +954,13 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 %additive
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8-NEXT: s_cbranch_execz .LBB5_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s8
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
; GFX8-NEXT: s_mul_i32 s7, s3, s6
; GFX8-NEXT: s_mul_i32 s6, s2, s6
; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s2, v0, 0
; GFX8-NEXT: s_mul_i32 s6, s3, s8
; GFX8-NEXT: v_mov_b32_e32 v3, 0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s7, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s6, v1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: ds_add_rtn_u64 v[0:1], v3, v[0:1]
@ -971,18 +969,17 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 %additive
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_mul_lo_u32 v0, s3, v2
; GFX8-NEXT: v_mul_hi_u32 v3, s2, v2
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_readfirstlane_b32 s1, v1
; GFX8-NEXT: v_mul_lo_u32 v1, s2, v2
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v4
; GFX8-NEXT: v_mov_b32_e32 v3, s1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
@ -1012,19 +1009,18 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 %additive
; GFX9-NEXT: .LBB5_2:
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[2:3], s2, v2, 0
; GFX9-NEXT: s_mov_b32 s4, s0
; GFX9-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX9-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_mul_lo_u32 v0, s2, v2
; GFX9-NEXT: s_mov_b32 s5, s1
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_readfirstlane_b32 s1, v1
; GFX9-NEXT: v_add_u32_e32 v1, v4, v3
; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_add_u32_e32 v1, v3, v4
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v2
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX9-NEXT: s_endpgm
;
@ -1057,13 +1053,12 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 %additive
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1064-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1064-NEXT: v_mad_u64_u32 v[2:3], s[2:3], s2, v2, 0
; GFX1064-NEXT: v_readfirstlane_b32 s2, v0
; GFX1064-NEXT: v_readfirstlane_b32 s4, v1
; GFX1064-NEXT: s_mov_b32 s3, 0x31016000
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1064-NEXT: v_add_co_u32 v0, vcc, s2, v2
; GFX1064-NEXT: s_mov_b32 s2, -1
; GFX1064-NEXT: v_add_co_ci_u32_e32 v1, vcc, s4, v1, vcc
@ -1098,13 +1093,12 @@ define amdgpu_kernel void @add_i64_uniform(i64 addrspace(1)* %out, i64 %additive
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1032-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1032-NEXT: v_mad_u64_u32 v[2:3], s2, s2, v2, 0
; GFX1032-NEXT: v_readfirstlane_b32 s2, v0
; GFX1032-NEXT: v_readfirstlane_b32 s4, v1
; GFX1032-NEXT: s_mov_b32 s3, 0x31016000
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1032-NEXT: v_add_co_u32 v0, vcc_lo, s2, v2
; GFX1032-NEXT: s_mov_b32 s2, -1
; GFX1032-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s4, v1, vcc_lo
@ -2133,15 +2127,13 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 %subitive
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX8-NEXT: s_cbranch_execz .LBB12_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v0, s8
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
; GFX8-NEXT: s_mul_i32 s7, s3, s6
; GFX8-NEXT: s_mul_i32 s6, s2, s6
; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s2, v0, 0
; GFX8-NEXT: s_mul_i32 s6, s3, s8
; GFX8-NEXT: v_mov_b32_e32 v3, 0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s7, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s6, v1
; GFX8-NEXT: s_mov_b32 m0, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: ds_sub_rtn_u64 v[0:1], v3, v[0:1]
@ -2150,18 +2142,17 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 %subitive
; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, s0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_mul_lo_u32 v0, s3, v2
; GFX8-NEXT: v_mul_hi_u32 v3, s2, v2
; GFX8-NEXT: s_mov_b32 s5, s1
; GFX8-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v2, 0
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_readfirstlane_b32 s1, v1
; GFX8-NEXT: v_mul_lo_u32 v1, s2, v2
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v4
; GFX8-NEXT: v_mov_b32_e32 v3, s1
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s0, v1
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s0, v2
; GFX8-NEXT: s_mov_b32 s7, 0xf000
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v2, vcc
; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v1, vcc
; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX8-NEXT: s_endpgm
;
@ -2191,19 +2182,18 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 %subitive
; GFX9-NEXT: .LBB12_2:
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[2:3], s2, v2, 0
; GFX9-NEXT: s_mov_b32 s4, s0
; GFX9-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX9-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_mul_lo_u32 v0, s2, v2
; GFX9-NEXT: s_mov_b32 s5, s1
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: v_readfirstlane_b32 s1, v1
; GFX9-NEXT: v_add_u32_e32 v1, v4, v3
; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_add_u32_e32 v1, v3, v4
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s0, v2
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GFX9-NEXT: s_endpgm
;
@ -2236,13 +2226,12 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 %subitive
; GFX1064-NEXT: s_waitcnt_depctr 0xffe3
; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1064-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1064-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1064-NEXT: v_mad_u64_u32 v[2:3], s[2:3], s2, v2, 0
; GFX1064-NEXT: v_readfirstlane_b32 s2, v0
; GFX1064-NEXT: v_readfirstlane_b32 s4, v1
; GFX1064-NEXT: s_mov_b32 s3, 0x31016000
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1064-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1064-NEXT: v_sub_co_u32 v0, vcc, s2, v2
; GFX1064-NEXT: s_mov_b32 s2, -1
; GFX1064-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s4, v1, vcc
@ -2277,13 +2266,12 @@ define amdgpu_kernel void @sub_i64_uniform(i64 addrspace(1)* %out, i64 %subitive
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_mul_lo_u32 v3, s3, v2
; GFX1032-NEXT: v_mul_hi_u32 v4, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v2, s2, v2
; GFX1032-NEXT: v_mul_lo_u32 v4, s3, v2
; GFX1032-NEXT: v_mad_u64_u32 v[2:3], s2, s2, v2, 0
; GFX1032-NEXT: v_readfirstlane_b32 s2, v0
; GFX1032-NEXT: v_readfirstlane_b32 s4, v1
; GFX1032-NEXT: s_mov_b32 s3, 0x31016000
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v4, v3
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v3, v4
; GFX1032-NEXT: v_sub_co_u32 v0, vcc_lo, s2, v2
; GFX1032-NEXT: s_mov_b32 s2, -1
; GFX1032-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s4, v1, vcc_lo

File diff suppressed because it is too large Load Diff

View File

@ -30,23 +30,24 @@ define { i64, i1 } @umulo_i64_v_v(i64 %x, i64 %y) {
; GFX9-LABEL: umulo_i64_v_v:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v5, v0, v3
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v2
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v3
; GFX9-NEXT: v_mul_lo_u32 v7, v1, v2
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v2
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v6, v5
; GFX9-NEXT: v_mul_hi_u32 v10, v1, v3
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v8, vcc
; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v7
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v8, v4, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v10, vcc
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v4, v1
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v8, vcc
; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[3:4]
; GFX9-NEXT: v_add3_u32 v1, v6, v5, v7
; GFX9-NEXT: v_mov_b32_e32 v5, v0
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v2, 0
; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v3, 0
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v4, v2, 0
; GFX9-NEXT: v_mov_b32_e32 v10, v1
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v10, v6
; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, 0, v7, vcc
; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v4, v3, 0
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v10, v8
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v11, v9, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
; GFX9-NEXT: v_mul_lo_u32 v4, v4, v2
; GFX9-NEXT: v_mul_lo_u32 v5, v5, v3
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v8, v6
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v7, vcc
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
; GFX9-NEXT: v_add3_u32 v1, v1, v5, v4
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@ -54,23 +55,24 @@ define { i64, i1 } @umulo_i64_v_v(i64 %x, i64 %y) {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mul_lo_u32 v5, v0, v3
; GFX10-NEXT: v_mul_hi_u32 v6, v0, v2
; GFX10-NEXT: v_mul_hi_u32 v4, v0, v3
; GFX10-NEXT: v_mul_lo_u32 v8, v1, v2
; GFX10-NEXT: v_mul_hi_u32 v7, v1, v2
; GFX10-NEXT: v_mul_hi_u32 v9, v1, v3
; GFX10-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v6, v5
; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v10, v8
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v4, v7, vcc_lo
; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v9, vcc_lo
; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v3, v1
; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
; GFX10-NEXT: v_add3_u32 v1, v6, v5, v8
; GFX10-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[3:4]
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v4, v2, 0
; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v4, v3, 0
; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v5, v2, 0
; GFX10-NEXT: v_mad_u64_u32 v[11:12], s4, v5, v3, 0
; GFX10-NEXT: v_mov_b32_e32 v8, v1
; GFX10-NEXT: v_mul_lo_u32 v5, v5, v2
; GFX10-NEXT: v_mul_lo_u32 v4, v4, v3
; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v8, v6
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
; GFX10-NEXT: v_add3_u32 v1, v1, v4, v5
; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v6, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v7, v10, vcc_lo
; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v12, vcc_lo
; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v2, v11
; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v6, vcc_lo
; GFX10-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
bb:
@ -119,35 +121,36 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
; GFX9-LABEL: smulo_i64_v_v:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mul_lo_u32 v5, v0, v3
; GFX9-NEXT: v_mul_hi_u32 v6, v0, v2
; GFX9-NEXT: v_mul_hi_u32 v8, v0, v3
; GFX9-NEXT: v_mul_lo_u32 v7, v1, v2
; GFX9-NEXT: v_mul_hi_u32 v4, v1, v2
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v6, v5
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v8, vcc
; GFX9-NEXT: v_mul_hi_i32 v10, v1, v3
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v9, v7
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v8, v4, vcc
; GFX9-NEXT: v_mul_lo_u32 v8, v1, v3
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, 0, v10, vcc
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v9, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v9, vcc, v4, v2
; GFX9-NEXT: v_subbrev_co_u32_e32 v10, vcc, 0, v8, vcc
; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v8, v10, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v8, v4, v9, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v9, vcc, v8, v0
; GFX9-NEXT: v_subbrev_co_u32_e32 v4, vcc, 0, v1, vcc
; GFX9-NEXT: v_mov_b32_e32 v5, v0
; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v2, 0
; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v3, 0
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v4, v2, 0
; GFX9-NEXT: v_mov_b32_e32 v10, v1
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v10, v6
; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, 0, v7, vcc
; GFX9-NEXT: v_mad_i64_i32 v[6:7], s[4:5], v4, v3, 0
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v10, v8
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, v11, v9, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6
; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v6, v2
; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v7, vcc
; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 0, v4
; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v6, v5
; GFX9-NEXT: v_mul_lo_u32 v4, v4, v2
; GFX9-NEXT: v_mul_lo_u32 v5, v5, v3
; GFX9-NEXT: v_subbrev_co_u32_e32 v9, vcc, 0, v7, vcc
; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v6, v5, v7
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v1
; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc
; GFX9-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX9-NEXT: v_mov_b32_e32 v6, v5
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, v[3:4], v[5:6]
; GFX9-NEXT: v_add3_u32 v1, v1, v5, v4
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v1
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v8, vcc
; GFX9-NEXT: v_mov_b32_e32 v5, v4
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, v[2:3], v[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@ -155,35 +158,36 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mul_lo_u32 v4, v0, v3
; GFX10-NEXT: v_mul_hi_u32 v5, v0, v2
; GFX10-NEXT: v_mul_hi_u32 v6, v0, v3
; GFX10-NEXT: v_mul_lo_u32 v8, v1, v2
; GFX10-NEXT: v_mul_hi_u32 v7, v1, v2
; GFX10-NEXT: v_mul_hi_i32 v9, v1, v3
; GFX10-NEXT: v_mul_lo_u32 v11, v1, v3
; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v5, v4
; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v10, v8
; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, v6, v7, vcc_lo
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v9, vcc_lo
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mad_u64_u32 v[0:1], s4, v4, v2, 0
; GFX10-NEXT: v_mad_u64_u32 v[6:7], s4, v4, v3, 0
; GFX10-NEXT: v_mad_u64_u32 v[9:10], s4, v5, v2, 0
; GFX10-NEXT: v_mad_i64_i32 v[11:12], s4, v5, v3, 0
; GFX10-NEXT: v_mov_b32_e32 v8, v1
; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v8, v6
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
; GFX10-NEXT: v_mul_lo_u32 v8, v5, v2
; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v6, v9
; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, v7, v10, vcc_lo
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v12, vcc_lo
; GFX10-NEXT: v_mul_lo_u32 v9, v4, v3
; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v6, v11
; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
; GFX10-NEXT: v_sub_co_u32 v9, vcc_lo, v6, v2
; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v6, v2
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v7, vcc_lo
; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v1
; GFX10-NEXT: v_add3_u32 v1, v5, v4, v8
; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v9, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v1
; GFX10-NEXT: v_sub_co_u32 v8, vcc_lo, v6, v0
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v9, vcc_lo, 0, v7, vcc_lo
; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v5
; GFX10-NEXT: v_add3_u32 v1, v1, v9, v8
; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v2, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v10, vcc_lo
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v6, v4
; GFX10-NEXT: v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v5, vcc_lo
; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v3
; GFX10-NEXT: v_mov_b32_e32 v5, v4
; GFX10-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v8, vcc_lo
; GFX10-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[6:7], v[4:5]
; GFX10-NEXT: v_mov_b32_e32 v3, v2
; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc_lo
; GFX10-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[2:3]
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
bb:

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=CI %s
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=SI %s
define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; CI-LABEL: mad_i64_i32_sextops:
@ -94,28 +94,26 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
; CI-LABEL: mad_i64_i32_sextops_i32_i128:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_mul_hi_u32 v6, v0, v1
; CI-NEXT: v_ashrrev_i32_e32 v12, 31, v0
; CI-NEXT: v_mov_b32_e32 v7, 0
; CI-NEXT: v_ashrrev_i32_e32 v13, 31, v1
; CI-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v1, v[6:7]
; CI-NEXT: v_mul_hi_i32 v11, v1, v12
; CI-NEXT: v_mul_lo_u32 v10, v1, v12
; CI-NEXT: v_mov_b32_e32 v6, v9
; CI-NEXT: v_mov_b32_e32 v9, v7
; CI-NEXT: v_mad_u64_u32 v[7:8], s[4:5], v0, v13, v[8:9]
; CI-NEXT: v_mad_i64_i32 v[10:11], s[4:5], v13, v0, v[10:11]
; CI-NEXT: v_add_i32_e32 v8, vcc, v6, v8
; CI-NEXT: v_addc_u32_e64 v9, s[4:5], 0, 0, vcc
; CI-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v12, v13, v[8:9]
; CI-NEXT: v_mul_lo_u32 v0, v0, v1
; CI-NEXT: v_mov_b32_e32 v1, v7
; CI-NEXT: v_add_i32_e32 v6, vcc, v8, v10
; CI-NEXT: v_addc_u32_e32 v8, vcc, v9, v11, vcc
; CI-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; CI-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0
; CI-NEXT: v_ashrrev_i32_e32 v13, 31, v0
; CI-NEXT: v_mov_b32_e32 v8, 0
; CI-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v1, v[7:8]
; CI-NEXT: v_ashrrev_i32_e32 v14, 31, v1
; CI-NEXT: v_mad_i64_i32 v[11:12], s[4:5], v1, v13, 0
; CI-NEXT: v_mov_b32_e32 v7, v10
; CI-NEXT: v_mov_b32_e32 v10, v8
; CI-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[9:10]
; CI-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[11:12]
; CI-NEXT: v_add_i32_e32 v9, vcc, v7, v9
; CI-NEXT: v_addc_u32_e64 v10, s[4:5], 0, 0, vcc
; CI-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v14, v[9:10]
; CI-NEXT: v_add_i32_e32 v7, vcc, v9, v0
; CI-NEXT: v_addc_u32_e32 v9, vcc, v10, v1, vcc
; CI-NEXT: v_mov_b32_e32 v1, v8
; CI-NEXT: v_add_i32_e32 v0, vcc, v6, v2
; CI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; CI-NEXT: v_addc_u32_e32 v2, vcc, v6, v4, vcc
; CI-NEXT: v_addc_u32_e32 v3, vcc, v8, v5, vcc
; CI-NEXT: v_addc_u32_e32 v2, vcc, v7, v4, vcc
; CI-NEXT: v_addc_u32_e32 v3, vcc, v9, v5, vcc
; CI-NEXT: s_setpc_b64 s[30:31]
;
; SI-LABEL: mad_i64_i32_sextops_i32_i128:
@ -234,17 +232,28 @@ define i64 @mad_u64_u32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
}
define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
; GCN-LABEL: mad_u64_u32_bitops_lhs_mask_small:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
; GCN-NEXT: v_mul_hi_u32 v3, v0, v2
; GCN-NEXT: v_mul_lo_u32 v1, v1, v2
; GCN-NEXT: v_mul_lo_u32 v0, v0, v2
; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
; CI-LABEL: mad_u64_u32_bitops_lhs_mask_small:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_and_b32_e32 v1, 1, v1
; CI-NEXT: v_mul_lo_u32 v3, v1, v2
; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v3
; CI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; CI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; CI-NEXT: s_setpc_b64 s[30:31]
;
; SI-LABEL: mad_u64_u32_bitops_lhs_mask_small:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 1, v1
; SI-NEXT: v_mul_hi_u32 v3, v0, v2
; SI-NEXT: v_mul_lo_u32 v1, v1, v2
; SI-NEXT: v_mul_lo_u32 v0, v0, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, v3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31]
%trunc.lhs = and i64 %arg0, 8589934591
%trunc.rhs = and i64 %arg1, 4294967295
%mul = mul i64 %trunc.lhs, %trunc.rhs
@ -253,17 +262,28 @@ define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
}
define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
; GCN-LABEL: mad_u64_u32_bitops_rhs_mask_small:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v1, 1, v3
; GCN-NEXT: v_mul_hi_u32 v3, v0, v2
; GCN-NEXT: v_mul_lo_u32 v1, v0, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v2
; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
; CI-LABEL: mad_u64_u32_bitops_rhs_mask_small:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_and_b32_e32 v1, 1, v3
; CI-NEXT: v_mul_lo_u32 v3, v0, v1
; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v3
; CI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; CI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; CI-NEXT: s_setpc_b64 s[30:31]
;
; SI-LABEL: mad_u64_u32_bitops_rhs_mask_small:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 1, v3
; SI-NEXT: v_mul_hi_u32 v3, v0, v2
; SI-NEXT: v_mul_lo_u32 v1, v0, v1
; SI-NEXT: v_mul_lo_u32 v0, v0, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, v3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31]
%trunc.lhs = and i64 %arg0, 4294967295
%trunc.rhs = and i64 %arg1, 8589934591
%mul = mul i64 %trunc.lhs, %trunc.rhs

View File

@ -73,8 +73,9 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 ad
; FUNC-LABEL: {{^}}mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
; GCN-DAG: s_mul_i32
; GCN-DAG: v_mul_hi_i32
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_i32
; VI: v_mad_i64_i32
define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
entry:
%0 = sext i32 %in to i64
@ -86,8 +87,9 @@ entry:
; FUNC-LABEL: {{^}}v_mul64_sext_c:
; EG-DAG: MULLO_INT
; EG-DAG: MULHI_INT
; GCN-DAG: v_mul_lo_u32
; GCN-DAG: v_mul_hi_i32
; SI-DAG: v_mul_lo_u32
; SI-DAG: v_mul_hi_i32
; VI: v_mad_i64_i32
; GCN: s_endpgm
define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
@ -98,8 +100,9 @@ define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(
}
; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
; GCN-DAG: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; GCN-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; SI-DAG: v_mul_lo_u32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
; VI: v_mad_i64_i32 v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}, 9, 0
; GCN: s_endpgm
define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
%val = load i32, i32 addrspace(1)* %in, align 4
@ -184,8 +187,9 @@ endif:
}
; FUNC-LABEL: {{^}}mul64_in_branch:
; GCN-DAG: s_mul_i32
; GCN-DAG: v_mul_hi_u32
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_u32
; VI: v_mad_u64_u32
; GCN: s_endpgm
define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
entry:
@ -225,19 +229,16 @@ endif:
; SI-DAG: s_mul_i32
; SI-DAG: v_mul_hi_u32
; VI: v_mul_hi_u32
; VI: s_mul_i32
; VI: s_mul_i32
; VI: v_mul_hi_u32
; VI: v_mul_hi_u32
; VI: s_mul_i32
; VI: v_mad_u64_u32
; VI: s_mul_i32
; VI: v_mad_u64_u32
; VI: s_mul_i32
; VI: s_mul_i32
; VI: v_mad_u64_u32
; VI: s_mul_i32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: s_mul_i32
; VI-DAG: s_mul_i32
; VI-DAG: s_mul_i32
; VI-DAG: s_mul_i32
; GCN: buffer_store_dwordx4
@ -270,11 +271,15 @@ define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, [8 x i32], i128 %
; SI-DAG: v_mul_lo_u32
; SI-DAG: v_mul_lo_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mad_u64_u32
; VI-DAG: v_mul_lo_u32
; VI-DAG: v_mul_lo_u32
; VI-DAG: v_mul_lo_u32
; VI-DAG: v_mul_hi_u32
; VI: v_mad_u64_u32
; VI: v_mad_u64_u32
; VI: v_mad_u64_u32
; GCN: {{buffer|flat}}_store_dwordx4
define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {

View File

@ -331,8 +331,7 @@ define amdgpu_kernel void @test_smul24_i64(i64 addrspace(1)* %out, [8 x i32], i3
; VI-NEXT: s_bfe_i32 s0, s0, 0x180000
; VI-NEXT: v_mov_b32_e32 v0, s1
; VI-NEXT: v_mul_hi_i32_i24_e32 v1, s0, v0
; VI-NEXT: s_mul_i32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mul_i32_i24_e32 v0, s0, v0
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
@ -428,8 +427,7 @@ define amdgpu_kernel void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_bfe_i32 s0, s0, 0x180000
; VI-NEXT: v_mul_hi_i32_i24_e64 v1, s0, s0
; VI-NEXT: s_mul_i32 s0, s0, s0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mul_i32_i24_e64 v0, s0, s0
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
@ -528,8 +526,7 @@ define amdgpu_kernel void @test_smul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %
; VI-NEXT: s_ashr_i64 s[0:1], s[0:1], 40
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mul_hi_i32_i24_e32 v1, s0, v0
; VI-NEXT: s_mul_i32 s0, s0, s2
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mul_i32_i24_e32 v0, s0, v0
; VI-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
; VI-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0

View File

@ -528,15 +528,11 @@ define amdgpu_kernel void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s4, 0xffffff
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: s_and_b32 s5, s6, s4
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_and_b32 s4, s7, s4
; VI-NEXT: s_mul_i32 s5, s5, s4
; VI-NEXT: v_mov_b32_e32 v0, s7
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mul_hi_u32_u24_e32 v1, s6, v0
; VI-NEXT: v_mov_b32_e32 v0, s5
; VI-NEXT: v_mul_u32_u24_e32 v0, s6, v0
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
@ -623,10 +619,8 @@ define amdgpu_kernel void @test_umul24_i64_square(i64 addrspace(1)* %out, [8 x i
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_and_b32 s1, s0, 0xffffff
; VI-NEXT: s_mul_i32 s1, s1, s1
; VI-NEXT: v_mul_hi_u32_u24_e64 v1, s0, s0
; VI-NEXT: v_mov_b32_e32 v0, s1
; VI-NEXT: v_mul_u32_u24_e64 v0, s0, s0
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;
@ -733,17 +727,13 @@ define amdgpu_kernel void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
; VI-NEXT: s_load_dword s0, s[0:1], 0x34
; VI-NEXT: s_mov_b32 s1, 0xffffff
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_and_b32 s3, s2, s1
; VI-NEXT: s_and_b32 s1, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: s_mul_i32 s3, s3, s1
; VI-NEXT: v_mul_hi_u32_u24_e32 v0, s2, v0
; VI-NEXT: v_and_b32_e32 v1, 1, v0
; VI-NEXT: v_mov_b32_e32 v0, s3
; VI-NEXT: v_mov_b32_e32 v1, s0
; VI-NEXT: v_mul_u32_u24_e32 v0, s2, v1
; VI-NEXT: v_mul_hi_u32_u24_e32 v1, s2, v1
; VI-NEXT: v_and_b32_e32 v1, 1, v1
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_endpgm
;

View File

@ -2586,98 +2586,86 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
; VI-NEXT: v_mov_b32_e32 v2, 0x4f800000
; VI-NEXT: v_madak_f32 v2, 0, v2, 0x47c35000
; VI-NEXT: v_rcp_f32_e32 v2, v2
; VI-NEXT: s_mov_b32 s4, 0xfffe7960
; VI-NEXT: s_mov_b32 s6, 0xfffe7960
; VI-NEXT: v_mov_b32_e32 v9, 0
; VI-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
; VI-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
; VI-NEXT: v_trunc_f32_e32 v3, v3
; VI-NEXT: v_mac_f32_e32 v2, 0xcf800000, v3
; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
; VI-NEXT: v_cvt_u32_f32_e32 v3, v3
; VI-NEXT: v_mul_hi_u32 v4, v2, s4
; VI-NEXT: v_mul_lo_u32 v5, v3, s4
; VI-NEXT: v_mul_lo_u32 v6, v2, s4
; VI-NEXT: v_subrev_u32_e32 v4, vcc, v2, v4
; VI-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; VI-NEXT: v_mul_lo_u32 v5, v2, v4
; VI-NEXT: v_mul_hi_u32 v7, v2, v6
; VI-NEXT: v_mul_hi_u32 v8, v2, v4
; VI-NEXT: v_mul_hi_u32 v10, v3, v4
; VI-NEXT: v_mul_lo_u32 v4, v3, v4
; VI-NEXT: v_add_u32_e32 v5, vcc, v7, v5
; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc
; VI-NEXT: v_mul_lo_u32 v8, v3, v6
; VI-NEXT: v_mul_hi_u32 v6, v3, v6
; VI-NEXT: v_add_u32_e32 v5, vcc, v5, v8
; VI-NEXT: v_addc_u32_e32 v5, vcc, v7, v6, vcc
; VI-NEXT: v_addc_u32_e32 v6, vcc, v10, v9, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; VI-NEXT: v_cvt_u32_f32_e32 v6, v2
; VI-NEXT: v_cvt_u32_f32_e32 v7, v3
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
; VI-NEXT: v_mul_lo_u32 v4, v7, s6
; VI-NEXT: v_subrev_u32_e32 v3, vcc, v6, v3
; VI-NEXT: v_add_u32_e32 v5, vcc, v4, v3
; VI-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
; VI-NEXT: v_mul_hi_u32 v8, v6, v2
; VI-NEXT: v_add_u32_e32 v8, vcc, v8, v3
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
; VI-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc
; VI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
; VI-NEXT: v_add_u32_e32 v2, vcc, v8, v2
; VI-NEXT: v_addc_u32_e32 v2, vcc, v10, v3, vcc
; VI-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
; VI-NEXT: v_mul_hi_u32 v4, v2, s4
; VI-NEXT: v_mul_lo_u32 v5, v3, s4
; VI-NEXT: v_mul_lo_u32 v6, v2, s4
; VI-NEXT: s_mov_b32 s4, 0x186a0
; VI-NEXT: v_subrev_u32_e32 v4, vcc, v2, v4
; VI-NEXT: v_add_u32_e32 v4, vcc, v4, v5
; VI-NEXT: v_mul_lo_u32 v5, v2, v4
; VI-NEXT: v_mul_hi_u32 v7, v2, v6
; VI-NEXT: v_mul_hi_u32 v8, v2, v4
; VI-NEXT: v_mul_hi_u32 v10, v3, v4
; VI-NEXT: v_mul_lo_u32 v4, v3, v4
; VI-NEXT: v_add_u32_e32 v5, vcc, v7, v5
; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc
; VI-NEXT: v_mul_lo_u32 v8, v3, v6
; VI-NEXT: v_mul_hi_u32 v6, v3, v6
; VI-NEXT: v_add_u32_e32 v5, vcc, v5, v8
; VI-NEXT: v_addc_u32_e32 v5, vcc, v7, v6, vcc
; VI-NEXT: v_addc_u32_e32 v6, vcc, v10, v9, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; VI-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
; VI-NEXT: v_mul_lo_u32 v4, v7, s6
; VI-NEXT: s_mov_b32 s6, 0x186a0
; VI-NEXT: v_subrev_u32_e32 v3, vcc, v6, v3
; VI-NEXT: v_add_u32_e32 v5, vcc, v3, v4
; VI-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
; VI-NEXT: v_mul_hi_u32 v8, v6, v2
; VI-NEXT: v_add_u32_e32 v8, vcc, v8, v3
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
; VI-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc
; VI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
; VI-NEXT: v_add_u32_e32 v2, vcc, v8, v2
; VI-NEXT: v_addc_u32_e32 v2, vcc, v10, v3, vcc
; VI-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
; VI-NEXT: v_mul_lo_u32 v4, v0, v3
; VI-NEXT: v_mul_hi_u32 v5, v0, v2
; VI-NEXT: v_mul_hi_u32 v6, v0, v3
; VI-NEXT: v_mul_hi_u32 v7, v1, v3
; VI-NEXT: v_mul_lo_u32 v3, v1, v3
; VI-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; VI-NEXT: v_mul_lo_u32 v6, v1, v2
; VI-NEXT: v_mul_hi_u32 v2, v1, v2
; VI-NEXT: v_add_u32_e32 v4, vcc, v4, v6
; VI-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc
; VI-NEXT: v_addc_u32_e32 v4, vcc, v7, v9, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
; VI-NEXT: v_mul_lo_u32 v4, v3, s4
; VI-NEXT: v_mul_hi_u32 v5, v2, s4
; VI-NEXT: v_mul_lo_u32 v6, v2, s4
; VI-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v6
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
; VI-NEXT: v_subrev_u32_e32 v4, vcc, s4, v0
; VI-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, v6, v2
; VI-NEXT: v_addc_u32_e32 v5, vcc, v7, v3, vcc
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, v5, 0
; VI-NEXT: v_mul_hi_u32 v6, v0, v4
; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v4, 0
; VI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v5, 0
; VI-NEXT: v_add_u32_e32 v2, vcc, v6, v2
; VI-NEXT: v_addc_u32_e32 v2, vcc, v7, v3, vcc
; VI-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, v2, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v3, vcc
; VI-NEXT: v_mul_lo_u32 v6, v5, s6
; VI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, 0
; VI-NEXT: s_mov_b32 s4, 0x1869f
; VI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v4
; VI-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; VI-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; VI-NEXT: v_add_u32_e32 v5, vcc, 2, v2
; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v7, vcc, 1, v2
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v6
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; VI-NEXT: v_subrev_u32_e32 v2, vcc, s6, v0
; VI-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v2
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; VI-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
; VI-NEXT: v_add_u32_e32 v3, vcc, 2, v4
; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v5, vcc
; VI-NEXT: v_add_u32_e32 v7, vcc, 1, v4
; VI-NEXT: v_cmp_lt_u32_e64 s[4:5], s4, v0
; VI-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
; VI-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc
; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
; VI-NEXT: v_cndmask_b32_e32 v4, v7, v5, vcc
; VI-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
; VI-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
; VI-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
; VI-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5]
; VI-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; VI-NEXT: v_cndmask_b32_e64 v0, v4, v2, s[4:5]
; VI-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GCN-LABEL: v_test_udiv64_mulhi_fold:
@ -2686,98 +2674,86 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
; GCN-NEXT: v_mov_b32_e32 v2, 0x4f800000
; GCN-NEXT: v_madak_f32 v2, 0, v2, 0x47c35000
; GCN-NEXT: v_rcp_f32_e32 v2, v2
; GCN-NEXT: s_mov_b32 s4, 0xfffe7960
; GCN-NEXT: s_mov_b32 s6, 0xfffe7960
; GCN-NEXT: v_mov_b32_e32 v9, 0
; GCN-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
; GCN-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
; GCN-NEXT: v_trunc_f32_e32 v3, v3
; GCN-NEXT: v_mac_f32_e32 v2, 0xcf800000, v3
; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3
; GCN-NEXT: v_mul_hi_u32 v4, v2, s4
; GCN-NEXT: v_mul_lo_u32 v5, v3, s4
; GCN-NEXT: v_mul_lo_u32 v6, v2, s4
; GCN-NEXT: v_subrev_u32_e32 v4, vcc, v2, v4
; GCN-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GCN-NEXT: v_mul_lo_u32 v5, v2, v4
; GCN-NEXT: v_mul_hi_u32 v7, v2, v6
; GCN-NEXT: v_mul_hi_u32 v8, v2, v4
; GCN-NEXT: v_mul_hi_u32 v10, v3, v4
; GCN-NEXT: v_mul_lo_u32 v4, v3, v4
; GCN-NEXT: v_add_u32_e32 v5, vcc, v7, v5
; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GCN-NEXT: v_mul_lo_u32 v8, v3, v6
; GCN-NEXT: v_mul_hi_u32 v6, v3, v6
; GCN-NEXT: v_add_u32_e32 v5, vcc, v5, v8
; GCN-NEXT: v_addc_u32_e32 v5, vcc, v7, v6, vcc
; GCN-NEXT: v_addc_u32_e32 v6, vcc, v10, v9, vcc
; GCN-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GCN-NEXT: v_cvt_u32_f32_e32 v6, v2
; GCN-NEXT: v_cvt_u32_f32_e32 v7, v3
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
; GCN-NEXT: v_mul_lo_u32 v4, v7, s6
; GCN-NEXT: v_subrev_u32_e32 v3, vcc, v6, v3
; GCN-NEXT: v_add_u32_e32 v5, vcc, v4, v3
; GCN-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
; GCN-NEXT: v_mul_hi_u32 v8, v6, v2
; GCN-NEXT: v_add_u32_e32 v8, vcc, v8, v3
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
; GCN-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc
; GCN-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
; GCN-NEXT: v_add_u32_e32 v2, vcc, v8, v2
; GCN-NEXT: v_addc_u32_e32 v2, vcc, v10, v3, vcc
; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; GCN-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; GCN-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
; GCN-NEXT: v_mul_hi_u32 v4, v2, s4
; GCN-NEXT: v_mul_lo_u32 v5, v3, s4
; GCN-NEXT: v_mul_lo_u32 v6, v2, s4
; GCN-NEXT: s_mov_b32 s4, 0x186a0
; GCN-NEXT: v_subrev_u32_e32 v4, vcc, v2, v4
; GCN-NEXT: v_add_u32_e32 v4, vcc, v4, v5
; GCN-NEXT: v_mul_lo_u32 v5, v2, v4
; GCN-NEXT: v_mul_hi_u32 v7, v2, v6
; GCN-NEXT: v_mul_hi_u32 v8, v2, v4
; GCN-NEXT: v_mul_hi_u32 v10, v3, v4
; GCN-NEXT: v_mul_lo_u32 v4, v3, v4
; GCN-NEXT: v_add_u32_e32 v5, vcc, v7, v5
; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc
; GCN-NEXT: v_mul_lo_u32 v8, v3, v6
; GCN-NEXT: v_mul_hi_u32 v6, v3, v6
; GCN-NEXT: v_add_u32_e32 v5, vcc, v5, v8
; GCN-NEXT: v_addc_u32_e32 v5, vcc, v7, v6, vcc
; GCN-NEXT: v_addc_u32_e32 v6, vcc, v10, v9, vcc
; GCN-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GCN-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
; GCN-NEXT: v_mul_lo_u32 v4, v7, s6
; GCN-NEXT: s_mov_b32 s6, 0x186a0
; GCN-NEXT: v_subrev_u32_e32 v3, vcc, v6, v3
; GCN-NEXT: v_add_u32_e32 v5, vcc, v3, v4
; GCN-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
; GCN-NEXT: v_mul_hi_u32 v8, v6, v2
; GCN-NEXT: v_add_u32_e32 v8, vcc, v8, v3
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
; GCN-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc
; GCN-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
; GCN-NEXT: v_add_u32_e32 v2, vcc, v8, v2
; GCN-NEXT: v_addc_u32_e32 v2, vcc, v10, v3, vcc
; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; GCN-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; GCN-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
; GCN-NEXT: v_mul_lo_u32 v4, v0, v3
; GCN-NEXT: v_mul_hi_u32 v5, v0, v2
; GCN-NEXT: v_mul_hi_u32 v6, v0, v3
; GCN-NEXT: v_mul_hi_u32 v7, v1, v3
; GCN-NEXT: v_mul_lo_u32 v3, v1, v3
; GCN-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc
; GCN-NEXT: v_mul_lo_u32 v6, v1, v2
; GCN-NEXT: v_mul_hi_u32 v2, v1, v2
; GCN-NEXT: v_add_u32_e32 v4, vcc, v4, v6
; GCN-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc
; GCN-NEXT: v_addc_u32_e32 v4, vcc, v7, v9, vcc
; GCN-NEXT: v_add_u32_e32 v2, vcc, v2, v3
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc
; GCN-NEXT: v_mul_lo_u32 v4, v3, s4
; GCN-NEXT: v_mul_hi_u32 v5, v2, s4
; GCN-NEXT: v_mul_lo_u32 v6, v2, s4
; GCN-NEXT: v_add_u32_e32 v4, vcc, v5, v4
; GCN-NEXT: v_sub_u32_e32 v0, vcc, v0, v6
; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
; GCN-NEXT: v_subrev_u32_e32 v4, vcc, s4, v0
; GCN-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v1, vcc
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-NEXT: v_add_u32_e32 v4, vcc, v6, v2
; GCN-NEXT: v_addc_u32_e32 v5, vcc, v7, v3, vcc
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v0, v5, 0
; GCN-NEXT: v_mul_hi_u32 v6, v0, v4
; GCN-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v4, 0
; GCN-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v5, 0
; GCN-NEXT: v_add_u32_e32 v2, vcc, v6, v2
; GCN-NEXT: v_addc_u32_e32 v2, vcc, v7, v3, vcc
; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v9, vcc
; GCN-NEXT: v_add_u32_e32 v4, vcc, v2, v4
; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v3, vcc
; GCN-NEXT: v_mul_lo_u32 v6, v5, s6
; GCN-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, s6, 0
; GCN-NEXT: s_mov_b32 s4, 0x1869f
; GCN-NEXT: v_cmp_lt_u32_e32 vcc, s4, v4
; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
; GCN-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN-NEXT: v_add_u32_e32 v5, vcc, 2, v2
; GCN-NEXT: v_addc_u32_e32 v6, vcc, 0, v3, vcc
; GCN-NEXT: v_add_u32_e32 v7, vcc, 1, v2
; GCN-NEXT: v_add_u32_e32 v3, vcc, v3, v6
; GCN-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; GCN-NEXT: v_subrev_u32_e32 v2, vcc, s6, v0
; GCN-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
; GCN-NEXT: v_cmp_lt_u32_e32 vcc, s4, v2
; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
; GCN-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
; GCN-NEXT: v_add_u32_e32 v3, vcc, 2, v4
; GCN-NEXT: v_addc_u32_e32 v6, vcc, 0, v5, vcc
; GCN-NEXT: v_add_u32_e32 v7, vcc, 1, v4
; GCN-NEXT: v_cmp_lt_u32_e64 s[4:5], s4, v0
; GCN-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
; GCN-NEXT: v_addc_u32_e32 v8, vcc, 0, v5, vcc
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GCN-NEXT: v_cndmask_b32_e64 v0, -1, v0, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v4, v7, v5, vcc
; GCN-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
; GCN-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v1, v8, v6, vcc
; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v0, v4, v2, s[4:5]
; GCN-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[4:5]
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: v_test_udiv64_mulhi_fold:
@ -2810,50 +2786,46 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
; GFX1030-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v9, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v3, vcc_lo, v4, v3
; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v5, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v3
; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, s4, v4, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v5, vcc_lo, v2, v3
; GFX1030-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, s4, v4, vcc_lo
; GFX1030-NEXT: v_mul_hi_u32 v8, v0, v5
; GFX1030-NEXT: v_mad_u64_u32 v[4:5], s4, v1, v5, 0
; GFX1030-NEXT: v_mad_u64_u32 v[2:3], s4, v0, v6, 0
; GFX1030-NEXT: v_mad_u64_u32 v[6:7], s4, v1, v6, 0
; GFX1030-NEXT: s_mov_b32 s4, 0x186a0
; GFX1030-NEXT: v_mul_hi_u32 v4, v0, v2
; GFX1030-NEXT: v_mul_hi_u32 v7, v1, v2
; GFX1030-NEXT: v_mul_lo_u32 v5, v0, v3
; GFX1030-NEXT: v_mul_hi_u32 v6, v0, v3
; GFX1030-NEXT: v_mul_lo_u32 v2, v1, v2
; GFX1030-NEXT: v_mul_hi_u32 v8, v1, v3
; GFX1030-NEXT: v_mul_lo_u32 v3, v1, v3
; GFX1030-NEXT: v_add_co_u32 v4, vcc_lo, v4, v5
; GFX1030-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v6, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v4, v2
; GFX1030-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v5, v7, vcc_lo
; GFX1030-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v8, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v3
; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v4, vcc_lo
; GFX1030-NEXT: v_mul_hi_u32 v4, v2, s4
; GFX1030-NEXT: v_mul_lo_u32 v6, v2, s4
; GFX1030-NEXT: v_mul_lo_u32 v5, v3, s4
; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v6
; GFX1030-NEXT: v_add_nc_u32_e32 v4, v4, v5
; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v4, vcc_lo
; GFX1030-NEXT: v_sub_co_u32 v4, vcc_lo, v0, s4
; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v8, v2
; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
; GFX1030-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v3, v5, vcc_lo
; GFX1030-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v7, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
; GFX1030-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v3, vcc_lo
; GFX1030-NEXT: v_mad_u64_u32 v[2:3], s5, v4, s4, 0
; GFX1030-NEXT: v_mul_lo_u32 v6, v5, s4
; GFX1030-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
; GFX1030-NEXT: v_add_nc_u32_e32 v3, v3, v6
; GFX1030-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
; GFX1030-NEXT: v_sub_co_u32 v2, vcc_lo, v0, s4
; GFX1030-NEXT: s_mov_b32 s4, 0x1869f
; GFX1030-NEXT: v_subrev_co_ci_u32_e32 v5, vcc_lo, 0, v1, vcc_lo
; GFX1030-NEXT: v_cmp_lt_u32_e32 vcc_lo, s4, v4
; GFX1030-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v6, vcc_lo, v2, 2
; GFX1030-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v3, vcc_lo
; GFX1030-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
; GFX1030-NEXT: v_cmp_lt_u32_e32 vcc_lo, s4, v2
; GFX1030-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v6, vcc_lo, v4, 2
; GFX1030-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, 0, v5, vcc_lo
; GFX1030-NEXT: v_cmp_lt_u32_e32 vcc_lo, s4, v0
; GFX1030-NEXT: v_cmp_eq_u32_e64 s4, 0, v1
; GFX1030-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
; GFX1030-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v5
; GFX1030-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
; GFX1030-NEXT: v_cndmask_b32_e64 v0, -1, v0, s4
; GFX1030-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v5, vcc_lo, v2, 1
; GFX1030-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v3, vcc_lo
; GFX1030-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
; GFX1030-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v4, v8, v7, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1030-NEXT: v_add_co_u32 v3, vcc_lo, v4, 1
; GFX1030-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v5, vcc_lo
; GFX1030-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX1030-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v2, v8, v7, vcc_lo
; GFX1030-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX1030-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
; GFX1030-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc_lo
; GFX1030-NEXT: s_setpc_b64 s[30:31]
;
; EG-LABEL: v_test_udiv64_mulhi_fold:

View File

@ -433,39 +433,40 @@ define amdgpu_gfx i64 @strict_wwm_called_i64(i64 %a) noinline {
; GFX9-O0-LABEL: strict_wwm_called_i64:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v0
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v1
; GFX9-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr6_vgpr7 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
; GFX9-O0-NEXT: v_add_co_u32_e64 v4, s[34:35], v2, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr2_vgpr3 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
; GFX9-O0-NEXT: v_add_co_u32_e64 v4, s[34:35], v4, v5
; GFX9-O0-NEXT: v_addc_co_u32_e64 v0, s[34:35], v0, v1, s[34:35]
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
; GFX9-O0-NEXT: s_mov_b32 s34, 32
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s34, v[0:1]
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
; GFX9-O0-NEXT: v_mul_lo_u32 v2, v0, v1
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
; GFX9-O0-NEXT: v_mul_hi_u32 v1, v0, v6
; GFX9-O0-NEXT: v_lshrrev_b64 v[7:8], s34, v[4:5]
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v7
; GFX9-O0-NEXT: v_mul_lo_u32 v3, v3, v6
; GFX9-O0-NEXT: v_add3_u32 v1, v1, v2, v3
; GFX9-O0-NEXT: v_mul_lo_u32 v1, v0, v1
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s34, v[4:5]
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
; GFX9-O0-NEXT: v_mul_lo_u32 v2, v2, v3
; GFX9-O0-NEXT: v_mad_u64_u32 v[6:7], s[36:37], v0, v3, 0
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
; GFX9-O0-NEXT: v_add3_u32 v0, v0, v1, v2
; GFX9-O0-NEXT: ; implicit-def: $sgpr35
; GFX9-O0-NEXT: ; implicit-def: $sgpr36
; GFX9-O0-NEXT: v_mov_b32_e32 v3, s35
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v3
; GFX9-O0-NEXT: v_lshlrev_b64 v[1:2], s34, v[1:2]
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s35
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
; GFX9-O0-NEXT: v_lshlrev_b64 v[1:2], s34, v[0:1]
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
; GFX9-O0-NEXT: v_mul_lo_u32 v6, v0, v6
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
; GFX9-O0-NEXT: s_mov_b32 s35, 0
; GFX9-O0-NEXT: v_mov_b32_e32 v0, 0
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
@ -496,10 +497,9 @@ define amdgpu_gfx i64 @strict_wwm_called_i64(i64 %a) noinline {
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v0, v0
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v1, vcc
; GFX9-O3-NEXT: v_mul_lo_u32 v4, v3, v0
; GFX9-O3-NEXT: v_mul_lo_u32 v1, v2, v1
; GFX9-O3-NEXT: v_mul_hi_u32 v5, v2, v0
; GFX9-O3-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX9-O3-NEXT: v_add3_u32 v1, v5, v1, v4
; GFX9-O3-NEXT: v_mul_lo_u32 v5, v2, v1
; GFX9-O3-NEXT: v_mad_u64_u32 v[0:1], s[34:35], v2, v0, 0
; GFX9-O3-NEXT: v_add3_u32 v1, v1, v5, v4
; GFX9-O3-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
; GFX9-O3-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-O3-NEXT: s_setpc_b64 s[30:31]
@ -514,10 +514,10 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-O0-NEXT: s_or_saveexec_b64 s[34:35], -1
; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
@ -529,40 +529,40 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35]
; GFX9-O0-NEXT: v_writelane_b32 v11, s33, 8
; GFX9-O0-NEXT: v_writelane_b32 v10, s33, 8
; GFX9-O0-NEXT: s_mov_b32 s33, s32
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xc00
; GFX9-O0-NEXT: v_writelane_b32 v11, s30, 0
; GFX9-O0-NEXT: v_writelane_b32 v11, s31, 1
; GFX9-O0-NEXT: v_writelane_b32 v10, s30, 0
; GFX9-O0-NEXT: v_writelane_b32 v10, s31, 1
; GFX9-O0-NEXT: s_mov_b32 s34, s8
; GFX9-O0-NEXT: s_mov_b32 s36, s4
; GFX9-O0-NEXT: ; kill: def $sgpr36 killed $sgpr36 def $sgpr36_sgpr37_sgpr38_sgpr39
; GFX9-O0-NEXT: s_mov_b32 s37, s5
; GFX9-O0-NEXT: s_mov_b32 s38, s6
; GFX9-O0-NEXT: s_mov_b32 s39, s7
; GFX9-O0-NEXT: v_writelane_b32 v11, s36, 2
; GFX9-O0-NEXT: v_writelane_b32 v11, s37, 3
; GFX9-O0-NEXT: v_writelane_b32 v11, s38, 4
; GFX9-O0-NEXT: v_writelane_b32 v11, s39, 5
; GFX9-O0-NEXT: v_writelane_b32 v10, s36, 2
; GFX9-O0-NEXT: v_writelane_b32 v10, s37, 3
; GFX9-O0-NEXT: v_writelane_b32 v10, s38, 4
; GFX9-O0-NEXT: v_writelane_b32 v10, s39, 5
; GFX9-O0-NEXT: ; kill: def $sgpr34 killed $sgpr34 def $sgpr34_sgpr35
; GFX9-O0-NEXT: s_mov_b32 s35, s9
; GFX9-O0-NEXT: ; kill: def $sgpr30_sgpr31 killed $sgpr34_sgpr35
; GFX9-O0-NEXT: s_mov_b64 s[30:31], 0
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s34
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s35
; GFX9-O0-NEXT: v_mov_b32_e32 v10, v1
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v0
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v1
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v0
; GFX9-O0-NEXT: s_not_b64 exec, exec
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s30
; GFX9-O0-NEXT: v_mov_b32_e32 v10, s31
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s30
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s31
; GFX9-O0-NEXT: s_not_b64 exec, exec
; GFX9-O0-NEXT: s_or_saveexec_b64 s[30:31], -1
; GFX9-O0-NEXT: v_writelane_b32 v11, s30, 6
; GFX9-O0-NEXT: v_writelane_b32 v11, s31, 7
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
; GFX9-O0-NEXT: v_writelane_b32 v10, s30, 6
; GFX9-O0-NEXT: v_writelane_b32 v10, s31, 7
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8
; GFX9-O0-NEXT: s_mov_b32 s30, 32
; GFX9-O0-NEXT: ; implicit-def: $sgpr34_sgpr35
; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s30, v[9:10]
; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s30, v[8:9]
; GFX9-O0-NEXT: s_getpc_b64 s[30:31]
; GFX9-O0-NEXT: s_add_u32 s30, s30, strict_wwm_called_i64@gotpcrel32@lo+4
; GFX9-O0-NEXT: s_addc_u32 s31, s31, strict_wwm_called_i64@gotpcrel32@hi+12
@ -575,18 +575,18 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[30:31]
; GFX9-O0-NEXT: v_readlane_b32 s34, v11, 6
; GFX9-O0-NEXT: v_readlane_b32 s35, v11, 7
; GFX9-O0-NEXT: v_readlane_b32 s36, v11, 2
; GFX9-O0-NEXT: v_readlane_b32 s37, v11, 3
; GFX9-O0-NEXT: v_readlane_b32 s38, v11, 4
; GFX9-O0-NEXT: v_readlane_b32 s39, v11, 5
; GFX9-O0-NEXT: v_readlane_b32 s30, v11, 0
; GFX9-O0-NEXT: v_readlane_b32 s31, v11, 1
; GFX9-O0-NEXT: v_readlane_b32 s34, v10, 6
; GFX9-O0-NEXT: v_readlane_b32 s35, v10, 7
; GFX9-O0-NEXT: v_readlane_b32 s36, v10, 2
; GFX9-O0-NEXT: v_readlane_b32 s37, v10, 3
; GFX9-O0-NEXT: v_readlane_b32 s38, v10, 4
; GFX9-O0-NEXT: v_readlane_b32 s39, v10, 5
; GFX9-O0-NEXT: v_readlane_b32 s30, v10, 0
; GFX9-O0-NEXT: v_readlane_b32 s31, v10, 1
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v9
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v10
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9
; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[40:41], v2, v4
; GFX9-O0-NEXT: v_addc_co_u32_e64 v3, s[40:41], v3, v5, s[40:41]
; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35]
@ -595,13 +595,13 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O0-NEXT: s_mov_b32 s34, 0
; GFX9-O0-NEXT: buffer_store_dwordx2 v[0:1], off, s[36:39], s34 offset:4
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xfffff400
; GFX9-O0-NEXT: v_readlane_b32 s33, v11, 8
; GFX9-O0-NEXT: v_readlane_b32 s33, v10, 8
; GFX9-O0-NEXT: s_or_saveexec_b64 s[34:35], -1
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_nop 0
; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_nop 0
; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_nop 0
; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
; GFX9-O0-NEXT: s_nop 0
@ -634,7 +634,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35]
; GFX9-O3-NEXT: s_mov_b32 s38, s33
; GFX9-O3-NEXT: s_mov_b32 s40, s33
; GFX9-O3-NEXT: s_mov_b32 s33, s32
; GFX9-O3-NEXT: s_addk_i32 s32, 0x800
; GFX9-O3-NEXT: s_mov_b64 s[36:37], s[30:31]
@ -644,7 +644,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O3-NEXT: v_mov_b32_e32 v6, 0
; GFX9-O3-NEXT: v_mov_b32_e32 v7, 0
; GFX9-O3-NEXT: s_not_b64 exec, exec
; GFX9-O3-NEXT: s_or_saveexec_b64 s[34:35], -1
; GFX9-O3-NEXT: s_or_saveexec_b64 s[38:39], -1
; GFX9-O3-NEXT: s_getpc_b64 s[30:31]
; GFX9-O3-NEXT: s_add_u32 s30, s30, strict_wwm_called_i64@gotpcrel32@lo+4
; GFX9-O3-NEXT: s_addc_u32 s31, s31, strict_wwm_called_i64@gotpcrel32@hi+12
@ -657,12 +657,12 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35]
; GFX9-O3-NEXT: s_mov_b64 exec, s[38:39]
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v3
; GFX9-O3-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 offset:4
; GFX9-O3-NEXT: s_addk_i32 s32, 0xf800
; GFX9-O3-NEXT: s_mov_b32 s33, s38
; GFX9-O3-NEXT: s_mov_b32 s33, s40
; GFX9-O3-NEXT: s_or_saveexec_b64 s[30:31], -1
; GFX9-O3-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
; GFX9-O3-NEXT: s_nop 0

View File

@ -144,15 +144,15 @@ define amdgpu_kernel void @call_i64(<4 x i32> inreg %tmp14, i64 inreg %arg) {
; GFX9-O0: s_mov_b64 s{{\[}}[[ZERO_LO:[0-9]+]]:[[ZERO_HI:[0-9]+]]{{\]}}, 0{{$}}
; GFX9-O0: v_mov_b32_e32 v0, s[[ARG_LO]]
; GFX9-O0: v_mov_b32_e32 v1, s[[ARG_HI]]
; GFX9-O0-DAG: v_mov_b32_e32 v10, v1
; GFX9-O0-DAG: v_mov_b32_e32 v9, v0
; GFX9-O0-DAG: v_mov_b32_e32 v9, v1
; GFX9-O0-DAG: v_mov_b32_e32 v8, v0
; GFX9-O3-DAG: v_mov_b32_e32 v7, s[[ARG_HI]]
; GFX9-O3-DAG: v_mov_b32_e32 v6, s[[ARG_LO]]
; GFX9: s_not_b64 exec, exec
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s[[ZERO_LO]]
; GFX9-O0-NEXT: v_mov_b32_e32 v10, s[[ZERO_HI]]
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s[[ZERO_LO]]
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s[[ZERO_HI]]
; GFX9-O3-NEXT: v_mov_b32_e32 v6, 0
; GFX9-O3-NEXT: v_mov_b32_e32 v7, 0
; GFX9-NEXT: s_not_b64 exec, exec
@ -338,15 +338,15 @@ define amdgpu_kernel void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg
; GFX9-O0: s_mov_b64 s{{\[}}[[ZERO_LO:[0-9]+]]:[[ZERO_HI:[0-9]+]]{{\]}}, 0{{$}}
; GFX9-O0: v_mov_b32_e32 v0, s[[ARG_LO]]
; GFX9-O0: v_mov_b32_e32 v1, s[[ARG_HI]]
; GFX9-O0-DAG: v_mov_b32_e32 v10, v1
; GFX9-O0-DAG: v_mov_b32_e32 v9, v0
; GFX9-O0-DAG: v_mov_b32_e32 v9, v1
; GFX9-O0-DAG: v_mov_b32_e32 v8, v0
; GFX9-O3-DAG: v_mov_b32_e32 v7, s[[ARG_HI]]
; GFX9-O3-DAG: v_mov_b32_e32 v6, s[[ARG_LO]]
; GFX9: s_not_b64 exec, exec
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s[[ZERO_LO]]
; GFX9-O0-NEXT: v_mov_b32_e32 v10, s[[ZERO_HI]]
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s[[ZERO_LO]]
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s[[ZERO_HI]]
; GFX9-O3-NEXT: v_mov_b32_e32 v6, 0
; GFX9-O3-NEXT: v_mov_b32_e32 v7, 0
; GFX9-NEXT: s_not_b64 exec, exec