[X86][SSE] Add support for <64 x i1> bool reduction

This generalizes the existing <32 x i1> pre-AVX2 split code to support reductions from <64 x i1> as well, we can probably generalize to any larger pow2 case in the future if the (unlikely) need ever arises.

We still need to tweak combineBitcastvxi1 to improve AVX512F codegen as its assumes vXi1 types should be handled on the mask registers even when they aren't legal.

Differential Revision: https://reviews.llvm.org/D67070

llvm-svn: 371328
This commit is contained in:
Simon Pilgrim 2019-09-08 11:46:21 +00:00
parent acf81f4210
commit 3262084384
4 changed files with 179 additions and 436 deletions

View File

@ -35859,11 +35859,12 @@ static SDValue combineHorizontalPredicateResult(SDNode *Extract,
SDLoc DL(Extract);
EVT MatchVT = Match.getValueType();
unsigned NumElts = MatchVT.getVectorNumElements();
unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (ExtractVT == MVT::i1) {
// Special case for (pre-legalization) vXi1 reductions.
if (NumElts > 32)
if (NumElts > 64 || !isPowerOf2_32(NumElts))
return SDValue();
if (TLI.isTypeLegal(MatchVT)) {
// If this is a legal AVX512 predicate type then we can just bitcast.
@ -35871,18 +35872,18 @@ static SDValue combineHorizontalPredicateResult(SDNode *Extract,
Movmsk = DAG.getBitcast(MovmskVT, Match);
} else {
// Use combineBitcastvxi1 to create the MOVMSK.
if (NumElts == 32 && !Subtarget.hasInt256()) {
while (NumElts > MaxElts) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
NumElts = 16;
NumElts /= 2;
}
EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
}
if (!Movmsk)
return SDValue();
Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, MVT::i32);
Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
} else {
// Bail with AVX512VL (which uses predicate registers).
if (Subtarget.hasVLX())
@ -35923,13 +35924,15 @@ static SDValue combineHorizontalPredicateResult(SDNode *Extract,
Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
NumElts = MaskSrcVT.getVectorNumElements();
}
assert(NumElts <= 32 && "Not expecting more than 32 elements");
assert((NumElts <= 32 || NumElts == 64) &&
"Not expecting more than 64 elements");
MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
if (BinOp == ISD::XOR) {
// parity -> (AND (CTPOP(MOVMSK X)), 1)
SDValue Mask = DAG.getConstant(1, DL, MVT::i32);
SDValue Result = DAG.getNode(ISD::CTPOP, DL, MVT::i32, Movmsk);
Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, Mask);
SDValue Mask = DAG.getConstant(1, DL, CmpVT);
SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
}
@ -35937,18 +35940,18 @@ static SDValue combineHorizontalPredicateResult(SDNode *Extract,
ISD::CondCode CondCode;
if (BinOp == ISD::OR) {
// any_of -> MOVMSK != 0
CmpC = DAG.getConstant(0, DL, MVT::i32);
CmpC = DAG.getConstant(0, DL, CmpVT);
CondCode = ISD::CondCode::SETNE;
} else {
// all_of -> MOVMSK == ((1 << NumElts) - 1)
CmpC = DAG.getConstant((1ULL << NumElts) - 1, DL, MVT::i32);
CmpC = DAG.getConstant((1ULL << NumElts) - 1, DL, CmpVT);
CondCode = ISD::CondCode::SETEQ;
}
// The setcc produces an i8 of 0/1, so extend that to the result width and
// negate to get the final 0/-1 mask value.
EVT SetccVT =
TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
SDValue Zero = DAG.getConstant(0, DL, ExtractVT);

View File

@ -804,77 +804,36 @@ define i1 @trunc_v32i16_v32i1(<32 x i16>) {
}
define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; SSE2-LABEL: trunc_v64i8_v64i1:
; SSE2: # %bb.0:
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_v64i8_v64i1:
; SSE41: # %bb.0:
; SSE41-NEXT: pand %xmm3, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
; SSE41-NEXT: pextrb $0, %xmm1, %eax
; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
; SSE-LABEL: trunc_v64i8_v64i1:
; SSE: # %bb.0:
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_v64i8_v64i1:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: cmpw $-1, %ax
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v64i8_v64i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: cmpl $-1, %eax
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -906,20 +865,8 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kortestq %k0, %k0
; AVX512BW-NEXT: sete %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@ -927,20 +874,8 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovb2m %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kortestq %k0, %k0
; AVX512VL-NEXT: sete %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = trunc <64 x i8> %0 to <64 x i1>
@ -1749,9 +1684,8 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: pcmpeqb %xmm3, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: negb %al
; SSE-NEXT: retq
;
; AVX1-LABEL: icmp_v64i8_v64i1:
@ -1762,10 +1696,10 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX1-NEXT: cmpw $-1, %ax
; AVX1-NEXT: sete %al
; AVX1-NEXT: negb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -1777,7 +1711,6 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: cmpl $-1, %eax
; AVX2-NEXT: sete %al
; AVX2-NEXT: negb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -1810,40 +1743,16 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX512BW-LABEL: icmp_v64i8_v64i1:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: kandq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kortestq %k0, %k0
; AVX512BW-NEXT: sete %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: icmp_v64i8_v64i1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: kandq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kortestq %k0, %k0
; AVX512VL-NEXT: sete %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = icmp eq <64 x i8> %0, zeroinitializer

View File

@ -795,77 +795,36 @@ define i1 @trunc_v32i16_v32i1(<32 x i16>) {
}
define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; SSE2-LABEL: trunc_v64i8_v64i1:
; SSE2: # %bb.0:
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_v64i8_v64i1:
; SSE41: # %bb.0:
; SSE41-NEXT: por %xmm3, %xmm1
; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: por %xmm0, %xmm1
; SSE41-NEXT: pextrb $0, %xmm1, %eax
; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
; SSE-LABEL: trunc_v64i8_v64i1:
; SSE: # %bb.0:
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: testw %ax, %ax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_v64i8_v64i1:
; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: testw %ax, %ax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v64i8_v64i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: testl %eax, %eax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -897,20 +856,8 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kortestq %k0, %k0
; AVX512BW-NEXT: setne %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@ -918,20 +865,8 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovb2m %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kortestq %k0, %k0
; AVX512VL-NEXT: setne %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = trunc <64 x i8> %0 to <64 x i1>
@ -1735,9 +1670,10 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: por %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbb %al, %al
; SSE-NEXT: testw %ax, %ax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
; AVX1-LABEL: icmp_v64i8_v64i1:
@ -1752,9 +1688,10 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: negl %eax
; AVX1-NEXT: sbbb %al, %al
; AVX1-NEXT: testw %ax, %ax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -1765,8 +1702,8 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbb %al, %al
; AVX2-NEXT: testl %eax, %eax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -1799,40 +1736,16 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX512BW-LABEL: icmp_v64i8_v64i1:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: korq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kortestq %k0, %k0
; AVX512BW-NEXT: setne %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: icmp_v64i8_v64i1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: korq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kortestq %k0, %k0
; AVX512VL-NEXT: setne %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = icmp eq <64 x i8> %0, zeroinitializer

View File

@ -904,77 +904,45 @@ define i1 @trunc_v32i16_v32i1(<32 x i16>) {
}
define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; SSE2-LABEL: trunc_v64i8_v64i1:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_v64i8_v64i1:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm3, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: pextrb $0, %xmm1, %eax
; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
; SSE-LABEL: trunc_v64i8_v64i1:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %ecx
; SSE-NEXT: xorb %al, %cl
; SSE-NEXT: setnp %al
; SSE-NEXT: retq
;
; AVX1-LABEL: trunc_v64i8_v64i1:
; AVX1: # %bb.0:
; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %ecx
; AVX1-NEXT: xorb %al, %cl
; AVX1-NEXT: setnp %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_v64i8_v64i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: xorl %eax, %ecx
; AVX2-NEXT: movl %ecx, %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: xorb %cl, %al
; AVX2-NEXT: setnp %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -1006,20 +974,17 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: movq %rax, %rcx
; AVX512BW-NEXT: shrq $32, %rcx
; AVX512BW-NEXT: xorl %eax, %ecx
; AVX512BW-NEXT: movl %ecx, %eax
; AVX512BW-NEXT: shrl $16, %eax
; AVX512BW-NEXT: xorl %ecx, %eax
; AVX512BW-NEXT: movl %eax, %ecx
; AVX512BW-NEXT: shrl $8, %ecx
; AVX512BW-NEXT: xorb %al, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@ -1027,20 +992,17 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovb2m %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kmovq %k0, %rax
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: xorl %eax, %ecx
; AVX512VL-NEXT: movl %ecx, %eax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: xorl %ecx, %eax
; AVX512VL-NEXT: movl %eax, %ecx
; AVX512VL-NEXT: shrl $8, %ecx
; AVX512VL-NEXT: xorb %al, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = trunc <64 x i8> %0 to <64 x i1>
@ -1943,53 +1905,23 @@ define i1 @icmp_v32i16_v32i1(<32 x i16>) {
}
define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; SSE2-LABEL: icmp_v64i8_v64i1:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pcmpeqb %xmm4, %xmm2
; SSE2-NEXT: pcmpeqb %xmm4, %xmm0
; SSE2-NEXT: pcmpeqb %xmm4, %xmm3
; SSE2-NEXT: pcmpeqb %xmm4, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: icmp_v64i8_v64i1:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm4, %xmm4
; SSE41-NEXT: pcmpeqb %xmm4, %xmm2
; SSE41-NEXT: pcmpeqb %xmm4, %xmm0
; SSE41-NEXT: pcmpeqb %xmm4, %xmm3
; SSE41-NEXT: pcmpeqb %xmm4, %xmm1
; SSE41-NEXT: pxor %xmm3, %xmm1
; SSE41-NEXT: pxor %xmm2, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm1
; SSE41-NEXT: pextrb $0, %xmm1, %eax
; SSE41-NEXT: # kill: def $al killed $al killed $eax
; SSE41-NEXT: retq
; SSE-LABEL: icmp_v64i8_v64i1:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pcmpeqb %xmm4, %xmm2
; SSE-NEXT: pcmpeqb %xmm4, %xmm0
; SSE-NEXT: pcmpeqb %xmm4, %xmm3
; SSE-NEXT: pcmpeqb %xmm4, %xmm1
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: pxor %xmm2, %xmm1
; SSE-NEXT: pxor %xmm0, %xmm1
; SSE-NEXT: psllw $7, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %ecx
; SSE-NEXT: xorb %al, %cl
; SSE-NEXT: setnp %al
; SSE-NEXT: retq
;
; AVX1-LABEL: icmp_v64i8_v64i1:
; AVX1: # %bb.0:
@ -2003,16 +1935,12 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpxor %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
; AVX1-NEXT: # kill: def $al killed $al killed $eax
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %ecx
; AVX1-NEXT: xorb %al, %cl
; AVX1-NEXT: setnp %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -2022,18 +1950,14 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpextrb $0, %xmm0, %eax
; AVX2-NEXT: # kill: def $al killed $al killed $eax
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: xorl %eax, %ecx
; AVX2-NEXT: movl %ecx, %eax
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: xorb %cl, %al
; AVX2-NEXT: setnp %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -2066,40 +1990,34 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>) {
; AVX512BW-LABEL: icmp_v64i8_v64i1:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kshiftrq $32, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $16, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $8, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $4, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $2, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kshiftrq $1, %k0, %k1
; AVX512BW-NEXT: kxorq %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
; AVX512BW-NEXT: kmovq %k0, %rax
; AVX512BW-NEXT: movq %rax, %rcx
; AVX512BW-NEXT: shrq $32, %rcx
; AVX512BW-NEXT: xorl %eax, %ecx
; AVX512BW-NEXT: movl %ecx, %eax
; AVX512BW-NEXT: shrl $16, %eax
; AVX512BW-NEXT: xorl %ecx, %eax
; AVX512BW-NEXT: movl %eax, %ecx
; AVX512BW-NEXT: shrl $8, %ecx
; AVX512BW-NEXT: xorb %al, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: icmp_v64i8_v64i1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kshiftrq $32, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $16, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $8, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $4, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $2, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kshiftrq $1, %k0, %k1
; AVX512VL-NEXT: kxorq %k1, %k0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
; AVX512VL-NEXT: kmovq %k0, %rax
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: xorl %eax, %ecx
; AVX512VL-NEXT: movl %ecx, %eax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: xorl %ecx, %eax
; AVX512VL-NEXT: movl %eax, %ecx
; AVX512VL-NEXT: shrl $8, %ecx
; AVX512VL-NEXT: xorb %al, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%a = icmp eq <64 x i8> %0, zeroinitializer