[TargetLowering] Add SimplifyMultipleUseDemandedBits

This patch introduces the DAG version of SimplifyMultipleUseDemandedBits, which attempts to peek through ops (mainly and/or/xor so far) that don't contribute to the demandedbits/elts of a node - which means we can do this even in cases where we have multiple uses of an op, which normally requires us to demanded all bits/elts. The intention is to remove a similar instruction - SelectionDAG::GetDemandedBits - once SimplifyMultipleUseDemandedBits has matured.

The InstCombine version of SimplifyMultipleUseDemandedBits can constant fold which I haven't added here yet, and so far I've only wired this up to some basic binops (and/or/xor/add/sub/mul) to demonstrate its use.

We do see a couple of regressions that need to be addressed:

    AMDGPU unsigned dot product codegen retains an AND mask (for ZERO_EXTEND) that it previously removed (but otherwise the dotproduct codegen is a lot better).
	
    X86/AVX2 has poor handling of vector ANY_EXTEND/ANY_EXTEND_VECTOR_INREG - it prematurely gets converted to ZERO_EXTEND_VECTOR_INREG.

The code owners have confirmed its ok for these cases to fixed up in future patches.

Differential Revision: https://reviews.llvm.org/D63281

llvm-svn: 366799
This commit is contained in:
Simon Pilgrim 2019-07-23 12:39:08 +00:00
parent 7c35db0865
commit 743d45ee25
14 changed files with 1068 additions and 1228 deletions

View File

@ -3065,6 +3065,14 @@ public:
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
DAGCombinerInfo &DCI) const;
/// More limited version of SimplifyDemandedBits that can be used to "look
/// through" ops that don't contribute to the DemandedBits/DemandedElts -
/// bitwise ops etc.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
const APInt &DemandedElts,
SelectionDAG &DAG,
unsigned Depth) const;
/// Look at Vector Op. At this point, we know that only the DemandedElts
/// elements of the result of Op are ever used downstream. If we can use
/// this information to simplify Op, create a new simplified DAG node and
@ -3139,6 +3147,13 @@ public:
TargetLoweringOpt &TLO,
unsigned Depth = 0) const;
/// More limited version of SimplifyDemandedBits that can be used to "look
/// through" ops that don't contribute to the DemandedBits/DemandedElts -
/// bitwise ops etc.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
SelectionDAG &DAG, unsigned Depth) const;
/// This method returns the constant pool value that will be loaded by LD.
/// NOTE: You must check for implicit extensions of the constant by LD.
virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;

View File

@ -564,6 +564,61 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
AssumeSingleUse);
}
// TODO: Can we merge SelectionDAG::GetDemandedBits into this?
// TODO: Under what circumstances can we create nodes? BITCAST? Constant?
SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
SelectionDAG &DAG, unsigned Depth) const {
KnownBits LHSKnown, RHSKnown;
switch (Op.getOpcode()) {
case ISD::AND: {
LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and' in this
// context.
if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
return Op.getOperand(0);
if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
return Op.getOperand(1);
break;
}
case ISD::OR: {
LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
// If all of the demanded bits are known zero on one side, return the
// other. These bits cannot contribute to the result of the 'or' in this
// context.
if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
return Op.getOperand(0);
if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
return Op.getOperand(1);
break;
}
case ISD::XOR: {
LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
// If all of the demanded bits are known zero on one side, return the
// other.
if (DemandedBits.isSubsetOf(RHSKnown.Zero))
return Op.getOperand(0);
if (DemandedBits.isSubsetOf(LHSKnown.Zero))
return Op.getOperand(1);
break;
}
default:
if (Op.getOpcode() >= ISD::BUILTIN_OP_END)
if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode(
Op, DemandedBits, DemandedElts, DAG, Depth))
return V;
break;
}
return SDValue();
}
/// Look at Op. At this point, we know that only the OriginalDemandedBits of the
/// result of Op are ever used downstream. If we can use this information to
/// simplify Op, create a new simplified DAG node and return true, returning the
@ -834,6 +889,20 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
if (DemandedOp0 || DemandedOp1) {
Op0 = DemandedOp0 ? DemandedOp0 : Op0;
Op1 = DemandedOp1 ? DemandedOp1 : Op1;
SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
return TLO.CombineTo(Op, NewOp);
}
}
// If all of the demanded bits are known one on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
if (DemandedBits.isSubsetOf(Known2.Zero | Known.One))
@ -869,6 +938,20 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
if (DemandedOp0 || DemandedOp1) {
Op0 = DemandedOp0 ? DemandedOp0 : Op0;
Op1 = DemandedOp1 ? DemandedOp1 : Op1;
SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
return TLO.CombineTo(Op, NewOp);
}
}
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
if (DemandedBits.isSubsetOf(Known2.One | Known.Zero))
@ -901,6 +984,20 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1);
if (DemandedOp0 || DemandedOp1) {
Op0 = DemandedOp0 ? DemandedOp0 : Op0;
Op1 = DemandedOp1 ? DemandedOp1 : Op1;
SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1);
return TLO.CombineTo(Op, NewOp);
}
}
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
if (DemandedBits.isSubsetOf(Known.Zero))
@ -1663,6 +1760,7 @@ bool TargetLowering::SimplifyDemandedBits(
// Add, Sub, and Mul don't demand any bits in positions beyond that
// of the highest bit demanded of them.
SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
SDNodeFlags Flags = Op.getNode()->getFlags();
unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros();
APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO,
@ -1671,7 +1769,6 @@ bool TargetLowering::SimplifyDemandedBits(
Depth + 1) ||
// See if the operation should be performed at a smaller bit width.
ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
SDNodeFlags Flags = Op.getNode()->getFlags();
if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
// Disable the nsw and nuw flags. We can no longer guarantee that we
// won't wrap after simplification.
@ -1684,6 +1781,23 @@ bool TargetLowering::SimplifyDemandedBits(
return true;
}
// Attempt to avoid multi-use ops if we don't need anything from them.
if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) {
SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1);
SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1);
if (DemandedOp0 || DemandedOp1) {
Flags.setNoSignedWrap(false);
Flags.setNoUnsignedWrap(false);
Op0 = DemandedOp0 ? DemandedOp0 : Op0;
Op1 = DemandedOp1 ? DemandedOp1 : Op1;
SDValue NewOp =
TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags);
return TLO.CombineTo(Op, NewOp);
}
}
// If we have a constant operand, we may be able to turn it into -1 if we
// do not demand the high bits. This can make the constant smaller to
// encode, allow more general folding, or match specialized instruction
@ -2357,6 +2471,19 @@ bool TargetLowering::SimplifyDemandedBitsForTargetNode(
return false;
}
SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
SelectionDAG &DAG, unsigned Depth) const {
assert(
(Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use SimplifyMultipleUseDemandedBits if you don't know whether Op"
" is a target node!");
return SDValue();
}
const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const {
return nullptr;
}

View File

@ -4308,6 +4308,7 @@ bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
resetSchedule();
initialFillReadyList(ReadyInsts);
}
assert(Bundle && "Failed to find schedule bundle");
LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
<< BB->getName() << "\n");

View File

@ -265,8 +265,7 @@ define void @test_32bit_opnd1_better(i32* %existing, i32* %new) {
define i32 @test_nouseful_bits(i8 %a, i32 %b) {
; CHECK-LABEL: test_nouseful_bits:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: bfi w8, w8, #8, #24
; CHECK-NEXT: orr w8, w0, w8, lsl #8
; CHECK-NEXT: mov w9, w0
; CHECK-NEXT: bfi w9, w8, #8, #24
; CHECK-NEXT: bfi w0, w9, #8, #24

View File

@ -899,41 +899,28 @@ define amdgpu_kernel void @idot4_acc16_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: s_mov_b32 s8, 0xffff
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
; GFX7-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_sext_i32_i8 s6, s4
; GFX7-NEXT: s_bfe_i32 s7, s4, 0x80008
; GFX7-NEXT: s_sext_i32_i8 s10, s5
; GFX7-NEXT: s_ashr_i32 s6, s4, 24
; GFX7-NEXT: s_bfe_i32 s7, s4, 0x80010
; GFX7-NEXT: s_bfe_i32 s10, s5, 0x80010
; GFX7-NEXT: s_bfe_i32 s11, s5, 0x80008
; GFX7-NEXT: s_bfe_i32 s12, s5, 0x80010
; GFX7-NEXT: s_ashr_i32 s5, s5, 24
; GFX7-NEXT: v_mov_b32_e32 v3, s11
; GFX7-NEXT: v_mov_b32_e32 v4, s10
; GFX7-NEXT: s_bfe_i32 s9, s4, 0x80010
; GFX7-NEXT: v_mov_b32_e32 v2, s12
; GFX7-NEXT: s_ashr_i32 s4, s4, 24
; GFX7-NEXT: s_ashr_i32 s9, s5, 24
; GFX7-NEXT: s_sext_i32_i8 s5, s5
; GFX7-NEXT: s_bfe_i32 s8, s4, 0x80008
; GFX7-NEXT: s_sext_i32_i8 s4, s4
; GFX7-NEXT: v_mov_b32_e32 v1, s5
; GFX7-NEXT: v_mul_i32_i24_e32 v1, s4, v1
; GFX7-NEXT: v_mul_i32_i24_e32 v2, s9, v2
; GFX7-NEXT: v_mul_i32_i24_e32 v3, s7, v3
; GFX7-NEXT: v_mul_i32_i24_e32 v4, s6, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX7-NEXT: v_and_b32_e32 v2, s8, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX7-NEXT: v_and_b32_e32 v4, s8, v4
; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
; GFX7-NEXT: v_or_b32_e32 v2, v4, v3
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v1
; GFX7-NEXT: v_mov_b32_e32 v2, s11
; GFX7-NEXT: v_mov_b32_e32 v3, s10
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v3, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v4, v0
; GFX7-NEXT: v_mad_i32_i24 v0, s4, v1, v0
; GFX7-NEXT: v_mad_i32_i24 v0, s8, v2, v0
; GFX7-NEXT: v_mad_i32_i24 v0, s7, v3, v0
; GFX7-NEXT: v_mov_b32_e32 v1, s9
; GFX7-NEXT: v_mad_i32_i24 v0, s6, v1, v0
; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;

View File

@ -1802,33 +1802,23 @@ define amdgpu_kernel void @udot4_acc16_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX7-NEXT: buffer_load_ushort v0, off, s[0:3], 0
; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_and_b32 s11, s4, s8
; GFX7-NEXT: s_bfe_u32 s6, s4, 0x80008
; GFX7-NEXT: s_bfe_u32 s9, s5, 0x80008
; GFX7-NEXT: s_lshr_b32 s10, s5, 24
; GFX7-NEXT: s_and_b32 s8, s5, s8
; GFX7-NEXT: v_mov_b32_e32 v4, s9
; GFX7-NEXT: s_lshr_b32 s7, s4, 24
; GFX7-NEXT: v_mov_b32_e32 v2, s10
; GFX7-NEXT: s_bfe_u32 s5, s5, 0x80010
; GFX7-NEXT: v_mov_b32_e32 v3, s8
; GFX7-NEXT: v_mul_u32_u24_e32 v2, s7, v2
; GFX7-NEXT: v_mul_u32_u24_e32 v4, s6, v4
; GFX7-NEXT: s_bfe_u32 s4, s4, 0x80010
; GFX7-NEXT: s_lshr_b32 s6, s4, 24
; GFX7-NEXT: s_bfe_u32 s7, s4, 0x80008
; GFX7-NEXT: s_bfe_u32 s10, s5, 0x80008
; GFX7-NEXT: s_bfe_u32 s12, s5, 0x80010
; GFX7-NEXT: s_lshr_b32 s9, s5, 24
; GFX7-NEXT: s_and_b32 s5, s5, s8
; GFX7-NEXT: s_bfe_u32 s11, s4, 0x80010
; GFX7-NEXT: s_and_b32 s4, s4, s8
; GFX7-NEXT: v_mov_b32_e32 v1, s5
; GFX7-NEXT: v_mul_u32_u24_e32 v1, s4, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX7-NEXT: v_mul_u32_u24_e32 v3, s11, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
; GFX7-NEXT: v_or_b32_e32 v2, v3, v4
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v1
; GFX7-NEXT: v_mov_b32_e32 v2, s10
; GFX7-NEXT: v_mov_b32_e32 v3, s12
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v3, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v4, v0
; GFX7-NEXT: v_mad_u32_u24 v0, s4, v1, v0
; GFX7-NEXT: v_mad_u32_u24 v0, s7, v2, v0
; GFX7-NEXT: v_mad_u32_u24 v0, s11, v3, v0
; GFX7-NEXT: v_mov_b32_e32 v1, s9
; GFX7-NEXT: v_mad_u32_u24 v0, s6, v1, v0
; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
@ -2023,23 +2013,23 @@ define amdgpu_kernel void @udot4_acc8_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX7-NEXT: v_mul_u32_u24_e32 v1, s9, v1
; GFX7-NEXT: v_mul_u32_u24_e32 v2, s7, v2
; GFX7-NEXT: v_mul_u32_u24_e32 v3, s6, v3
; GFX7-NEXT: s_and_b32 s4, s4, s8
; GFX7-NEXT: s_and_b32 s5, s4, s8
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX7-NEXT: v_and_b32_e32 v2, s8, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
; GFX7-NEXT: v_or_b32_e32 v2, s4, v3
; GFX7-NEXT: v_or_b32_e32 v2, s5, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX7-NEXT: v_or_b32_e32 v1, v2, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v3
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v3, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v4, v0
; GFX7-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
@ -2055,31 +2045,32 @@ define amdgpu_kernel void @udot4_acc8_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX8-NEXT: s_load_dword s0, s[4:5], 0x0
; GFX8-NEXT: s_load_dword s1, s[6:7], 0x0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, s0
; GFX8-NEXT: v_mov_b32_e32 v4, s1
; GFX8-NEXT: s_and_b32 s7, s1, s8
; GFX8-NEXT: s_lshr_b32 s2, s0, 24
; GFX8-NEXT: s_lshr_b32 s3, s1, 24
; GFX8-NEXT: s_bfe_u32 s6, s1, 0x80010
; GFX8-NEXT: s_and_b32 s7, s1, s8
; GFX8-NEXT: v_mov_b32_e32 v3, s0
; GFX8-NEXT: v_mov_b32_e32 v4, s1
; GFX8-NEXT: v_mul_u32_u24_sdwa v3, v3, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX8-NEXT: s_bfe_u32 s4, s0, 0x80010
; GFX8-NEXT: v_mov_b32_e32 v5, s6
; GFX8-NEXT: s_and_b32 s5, s0, s8
; GFX8-NEXT: v_mov_b32_e32 v4, s7
; GFX8-NEXT: v_mul_u32_u24_e32 v4, s5, v4
; GFX8-NEXT: s_bfe_u32 s4, s0, 0x80010
; GFX8-NEXT: v_mov_b32_e32 v5, s6
; GFX8-NEXT: v_mov_b32_e32 v6, s3
; GFX8-NEXT: v_mov_b32_e32 v7, s2
; GFX8-NEXT: v_mul_u32_u24_e32 v4, s5, v4
; GFX8-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX8-NEXT: v_mul_u32_u24_e32 v5, s4, v5
; GFX8-NEXT: v_mul_u32_u24_sdwa v6, v7, v6 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX8-NEXT: v_or_b32_sdwa v5, v5, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v3
; GFX8-NEXT: v_or_b32_e32 v4, v3, v5
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v4, v2
; GFX8-NEXT: v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: v_add_u32_sdwa v2, vcc, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
; GFX8-NEXT: v_add_u32_sdwa v2, vcc, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX8-NEXT: v_add_u32_sdwa v2, vcc, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX8-NEXT: flat_store_byte v[0:1], v2
; GFX8-NEXT: s_endpgm
;
@ -2101,20 +2092,21 @@ define amdgpu_kernel void @udot4_acc8_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX9-NODL-NEXT: s_lshr_b32 s4, s3, 24
; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v3, s2, v3
; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v4, s2, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-NODL-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NODL-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NODL-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NODL-NEXT: s_lshr_b32 s5, s2, 24
; GFX9-NODL-NEXT: v_mov_b32_e32 v4, s4
; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v4, s5, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v5, s0, v5
; GFX9-NODL-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX9-NODL-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NODL-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v4, 8, v3
; GFX9-NODL-NEXT: v_or_b32_e32 v4, v3, v4
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; GFX9-NODL-NEXT: s_waitcnt vmcnt(0)
; GFX9-NODL-NEXT: v_add_u32_e32 v2, v3, v2
; GFX9-NODL-NEXT: v_add_u32_e32 v2, v2, v4
; GFX9-NODL-NEXT: v_add_u32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NODL-NEXT: v_add_u32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX9-NODL-NEXT: v_add_u32_e32 v2, v2, v5
; GFX9-NODL-NEXT: v_add_u32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NODL-NEXT: v_add_u32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX9-NODL-NEXT: global_store_byte v[0:1], v2, off
; GFX9-NODL-NEXT: s_endpgm
;
@ -2136,20 +2128,21 @@ define amdgpu_kernel void @udot4_acc8_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX9-DL-NEXT: s_lshr_b32 s4, s3, 24
; GFX9-DL-NEXT: v_mul_lo_u16_e32 v3, s2, v3
; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v4, s2, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-DL-NEXT: v_mov_b32_e32 v5, s1
; GFX9-DL-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-DL-NEXT: v_mov_b32_e32 v5, s1
; GFX9-DL-NEXT: s_lshr_b32 s5, s2, 24
; GFX9-DL-NEXT: v_mov_b32_e32 v4, s4
; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v4, s5, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-DL-NEXT: v_mul_lo_u16_e32 v5, s0, v5
; GFX9-DL-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX9-DL-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-DL-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v4, 8, v3
; GFX9-DL-NEXT: v_or_b32_e32 v4, v3, v4
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v4
; GFX9-DL-NEXT: s_waitcnt vmcnt(0)
; GFX9-DL-NEXT: v_add_u32_e32 v2, v3, v2
; GFX9-DL-NEXT: v_add_u32_e32 v2, v2, v4
; GFX9-DL-NEXT: v_add_u32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-DL-NEXT: v_add_u32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX9-DL-NEXT: v_add_u32_e32 v2, v2, v5
; GFX9-DL-NEXT: v_add_u32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-DL-NEXT: v_add_u32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX9-DL-NEXT: global_store_byte v[0:1], v2, off
; GFX9-DL-NEXT: s_endpgm
;
@ -2167,27 +2160,28 @@ define amdgpu_kernel void @udot4_acc8_vecMul(<4 x i8> addrspace(1)* %src1,
; GFX10-DL-NEXT: v_mov_b32_e32 v1, s1
; GFX10-DL-NEXT: global_load_ubyte v3, v[0:1], off
; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-DL-NEXT: s_lshr_b32 s0, s3, 24
; GFX10-DL-NEXT: s_lshr_b32 s5, s4, 24
; GFX10-DL-NEXT: s_lshr_b32 s1, s3, 16
; GFX10-DL-NEXT: s_lshr_b32 s6, s4, 16
; GFX10-DL-NEXT: v_and_b32_sdwa v4, s3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_sdwa v5, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX10-DL-NEXT: s_lshr_b32 s0, s3, 24
; GFX10-DL-NEXT: s_lshr_b32 s1, s3, 16
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v6, s3, s4
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v7, s0, s5
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v8, s1, s6
; GFX10-DL-NEXT: s_lshr_b32 s3, s4, 16
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v4, v4, v5
; GFX10-DL-NEXT: s_lshr_b32 s4, s4, 24
; GFX10-DL-NEXT: v_and_b32_sdwa v5, v6, s2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_sdwa v6, v7, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_sdwa v7, v8, s2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_sdwa v2, v4, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-DL-NEXT: v_or_b32_sdwa v4, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX10-DL-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX10-DL-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX10-DL-NEXT: v_lshrrev_b32_e32 v4, 8, v2
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v6, s1, s3
; GFX10-DL-NEXT: v_and_b32_sdwa v4, v4, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-DL-NEXT: v_mul_lo_u16_e64 v7, s0, s4
; GFX10-DL-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX10-DL-NEXT: v_and_b32_sdwa v5, v6, s2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_sdwa v2, v7, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-DL-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX10-DL-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; GFX10-DL-NEXT: v_or_b32_e32 v2, v4, v2
; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v2
; GFX10-DL-NEXT: s_waitcnt vmcnt(0)
; GFX10-DL-NEXT: v_add_nc_u32_e32 v3, v2, v3
; GFX10-DL-NEXT: v_add_nc_u32_e32 v3, v3, v4
; GFX10-DL-NEXT: v_add_nc_u32_e32 v3, v4, v3
; GFX10-DL-NEXT: v_add_nc_u32_e32 v3, v3, v5
; GFX10-DL-NEXT: v_add_nc_u32_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX10-DL-NEXT: v_add_nc_u32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
; GFX10-DL-NEXT: global_store_byte v[0:1], v2, off

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1931,19 +1931,19 @@ define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)*
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_or_b32_e32 v1, v2, v3
; GCN-NEXT: v_xor_b32_e32 v2, v0, v1
; GCN-NEXT: v_or_b32_e32 v2, v2, v3
; GCN-NEXT: v_xor_b32_e32 v1, v1, v3
; GCN-NEXT: v_ashrrev_i32_e32 v1, 30, v1
; GCN-NEXT: v_cvt_f32_i32_e32 v0, v0
; GCN-NEXT: v_cvt_f32_i32_e32 v1, v1
; GCN-NEXT: v_ashrrev_i32_e32 v2, 30, v2
; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v1
; GCN-NEXT: v_or_b32_e32 v2, 1, v2
; GCN-NEXT: v_cvt_f32_i32_e32 v2, v2
; GCN-NEXT: v_or_b32_e32 v1, 1, v1
; GCN-NEXT: v_rcp_iflag_f32_e32 v3, v2
; GCN-NEXT: v_mul_f32_e32 v3, v0, v3
; GCN-NEXT: v_trunc_f32_e32 v3, v3
; GCN-NEXT: v_mad_f32 v0, -v3, v1, v0
; GCN-NEXT: v_mad_f32 v0, -v3, v2, v0
; GCN-NEXT: v_cvt_i32_f32_e32 v3, v3
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v1|
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v2|
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v3
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24
; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
@ -1970,21 +1970,21 @@ define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)*
; TONGA-NEXT: s_waitcnt vmcnt(1)
; TONGA-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; TONGA-NEXT: v_or_b32_e32 v1, v1, v2
; TONGA-NEXT: v_cvt_f32_i32_e32 v2, v1
; TONGA-NEXT: v_cvt_f32_i32_e32 v1, v1
; TONGA-NEXT: s_waitcnt vmcnt(0)
; TONGA-NEXT: v_or_b32_e32 v0, v3, v0
; TONGA-NEXT: v_cvt_f32_i32_e32 v3, v0
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v1
; TONGA-NEXT: v_rcp_iflag_f32_e32 v4, v2
; TONGA-NEXT: v_or_b32_e32 v3, v3, v0
; TONGA-NEXT: v_cvt_f32_i32_e32 v3, v3
; TONGA-NEXT: v_xor_b32_e32 v0, v0, v2
; TONGA-NEXT: v_rcp_iflag_f32_e32 v4, v1
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 30, v0
; TONGA-NEXT: v_or_b32_e32 v0, 1, v0
; TONGA-NEXT: v_mul_f32_e32 v1, v3, v4
; TONGA-NEXT: v_trunc_f32_e32 v1, v1
; TONGA-NEXT: v_mad_f32 v3, -v1, v2, v3
; TONGA-NEXT: v_cvt_i32_f32_e32 v1, v1
; TONGA-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v2|
; TONGA-NEXT: v_mul_f32_e32 v2, v3, v4
; TONGA-NEXT: v_trunc_f32_e32 v2, v2
; TONGA-NEXT: v_mad_f32 v3, -v2, v1, v3
; TONGA-NEXT: v_cvt_i32_f32_e32 v2, v2
; TONGA-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
; TONGA-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1
; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v2
; TONGA-NEXT: v_bfe_i32 v0, v0, 0, 24
; TONGA-NEXT: buffer_store_dword v0, off, s[0:3], 0
; TONGA-NEXT: s_endpgm
@ -2011,18 +2011,18 @@ define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)*
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX9-NEXT: v_or_b32_e32 v2, v2, v3
; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v2
; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v0
; GFX9-NEXT: v_xor_b32_e32 v0, v0, v2
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 30, v0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v3
; GFX9-NEXT: v_or_b32_e32 v0, 1, v0
; GFX9-NEXT: v_mul_f32_e32 v2, v1, v4
; GFX9-NEXT: v_trunc_f32_e32 v2, v2
; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v2
; GFX9-NEXT: v_mad_f32 v1, -v2, v3, v1
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, |v3|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX9-NEXT: v_xor_b32_e32 v1, v1, v3
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 30, v1
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2
; GFX9-NEXT: v_or_b32_e32 v1, 1, v1
; GFX9-NEXT: v_mul_f32_e32 v3, v0, v4
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v3
; GFX9-NEXT: v_mad_f32 v0, -v3, v2, v0
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, |v2|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GFX9-NEXT: v_add_u32_e32 v0, v4, v0
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 24
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0

View File

@ -120,15 +120,15 @@ define void @fun2(<8 x i32> %src, <8 x i31>* %p)
define void @fun3(<3 x i31>* %src, <3 x i31>* %p)
; CHECK-LABEL: fun3:
; CHECK: # %bb.0:
; CHECK-NEXT: llgf %r1, 0(%r2)
; CHECK-NEXT: llgf %r0, 3(%r2)
; CHECK-NEXT: sllg %r4, %r1, 62
; CHECK-NEXT: llgf %r1, 6(%r2)
; CHECK-NEXT: llgf %r2, 0(%r2)
; CHECK-NEXT: rosbg %r1, %r0, 0, 32, 31
; CHECK-NEXT: sllg %r4, %r2, 62
; CHECK-NEXT: rosbg %r4, %r0, 0, 32, 31
; CHECK-NEXT: llgf %r0, 6(%r2)
; CHECK-NEXT: ogr %r0, %r4
; CHECK-NEXT: st %r0, 8(%r3)
; CHECK-NEXT: srlg %r0, %r4, 32
; CHECK-NEXT: sllg %r1, %r1, 30
; CHECK-NEXT: st %r1, 8(%r3)
; CHECK-NEXT: sllg %r1, %r2, 30
; CHECK-NEXT: lr %r1, %r0
; CHECK-NEXT: nihh %r1, 8191
; CHECK-NEXT: stg %r1, 0(%r3)

View File

@ -8,14 +8,13 @@
define void @foo(i8 %arg4, i32 %arg5, i32* %arg14) nounwind {
; CHECK-LABEL: foo:
; CHECK: ## %bb.0: ## %bb
; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $32, %edi
; CHECK-NEXT: orl $1601159181, %edi ## imm = 0x5F6FC00D
; CHECK-NEXT: andl %edi, %esi
; CHECK-NEXT: xorb $-14, %dil
; CHECK-NEXT: addb $82, %dil
; CHECK-NEXT: shrl $5, %esi
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: testb %sil, %sil
; CHECK-NEXT: leal 13(%rdi), %eax
; CHECK-NEXT: xorb $-14, %al
; CHECK-NEXT: addb $82, %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: testl %esi, %edi
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: cmovnel %eax, %ecx
; CHECK-NEXT: xorb $81, %cl

View File

@ -498,7 +498,6 @@ define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt)
define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) nounwind {
; SSE2-LABEL: var_funnnel_v8i16:
; SSE2: # %bb.0:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; SSE2-NEXT: psubw %xmm2, %xmm3
; SSE2-NEXT: psllw $12, %xmm3
@ -531,6 +530,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: psrlw $1, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
@ -768,7 +768,6 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
;
; X32-SSE-LABEL: var_funnnel_v8i16:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; X32-SSE-NEXT: psubw %xmm2, %xmm3
; X32-SSE-NEXT: psllw $12, %xmm3
@ -801,6 +800,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
; X32-SSE-NEXT: pandn %xmm1, %xmm4
; X32-SSE-NEXT: psrlw $1, %xmm1
; X32-SSE-NEXT: pand %xmm3, %xmm1
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: movdqa %xmm2, %xmm5
; X32-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]

View File

@ -1820,17 +1820,17 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX2-LABEL: test_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm1
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm2
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm2
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpackuswb %xmm0, %xmm2, %xmm2
; AVX2-NEXT: vpsrld $16, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
@ -1840,6 +1840,7 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: test_v16i8:

View File

@ -1792,14 +1792,11 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,xmm0[4],zero,xmm0[6],zero,xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,2,3,4,5,6,7]
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax