[X86][SSE] Add PACKSS support for truncation of clamped values

Followup to D42544 that matches PACKSSWB cases for non-AVX512, SSE and PACKSSDW cases will have to wait until we can add support for general SMIN/SMAX matching.

llvm-svn: 324339
This commit is contained in:
Simon Pilgrim 2018-02-06 12:16:10 +00:00
parent 022765de66
commit 90a237bf83
2 changed files with 15 additions and 49 deletions

View File

@ -34032,15 +34032,21 @@ static SDValue detectAVX512USatPattern(SDValue In, EVT VT,
static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT InVT = In.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isTypeLegal(In.getValueType()) || !TLI.isTypeLegal(VT))
if (!TLI.isTypeLegal(InVT) || !TLI.isTypeLegal(VT))
return SDValue();
if (isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget)) {
if (isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) {
if (auto SSatVal = detectSSatPattern(In, VT))
return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
if (auto USatVal = detectUSatPattern(In, VT))
return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
}
if (VT.getScalarType() == MVT::i8 && InVT.getScalarType() == MVT::i16) {
if (auto SSatVal = detectSSatPattern(In, VT))
return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
Subtarget);
}
return SDValue();
}

View File

@ -3274,28 +3274,14 @@ define <16 x i8> @trunc_ssat_v16i16_v16i8(<16 x i16> %a0) {
; AVX1-LABEL: trunc_ssat_v16i16_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127]
; AVX1-NEXT: vpminsw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpminsw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65408,65408,65408,65408,65408,65408,65408,65408]
; AVX1-NEXT: vpmaxsw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmaxsw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_ssat_v16i16_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -3409,44 +3395,18 @@ define <32 x i8> @trunc_ssat_v32i16_v32i8(<32 x i16> %a0) {
; AVX1-LABEL: trunc_ssat_v32i16_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127]
; AVX1-NEXT: vpminsw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpminsw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpminsw %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpminsw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65408,65408,65408,65408,65408,65408,65408,65408]
; AVX1-NEXT: vpmaxsw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpmaxsw %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpmaxsw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_ssat_v32i16_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX2-NEXT: vpminsw %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpminsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408]
; AVX2-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;