forked from OSchip/llvm-project
[X86] Add DAG combine to form saturating VTRUNCUS/VTRUNCS from VTRUNC
We already do this for ISD::TRUNCATE, but we can do the same for X86ISD::VTRUNC Differential Revision: https://reviews.llvm.org/D68432 llvm-svn: 373765
This commit is contained in:
parent
4380647e79
commit
074fa390d2
|
@ -41145,6 +41145,19 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
|
|||
return combineVectorTruncation(N, DAG, Subtarget);
|
||||
}
|
||||
|
||||
static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
|
||||
EVT VT = N->getValueType(0);
|
||||
SDValue In = N->getOperand(0);
|
||||
SDLoc DL(N);
|
||||
|
||||
if (auto SSatVal = detectSSatPattern(In, VT))
|
||||
return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
|
||||
if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
|
||||
return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
/// Returns the negated value if the node \p N flips sign of FP value.
|
||||
///
|
||||
/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
|
||||
|
@ -44503,6 +44516,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
|||
case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
|
||||
case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
|
||||
case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
|
||||
case X86ISD::VTRUNC: return combineVTRUNC(N, DAG);
|
||||
case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
|
||||
case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
|
||||
case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
|
||||
|
|
|
@ -750,8 +750,7 @@ define <16 x i8> @usat_trunc_db_256(<8 x i32> %x) {
|
|||
;
|
||||
; SKX-LABEL: usat_trunc_db_256:
|
||||
; SKX: ## %bb.0:
|
||||
; SKX-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; SKX-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; SKX-NEXT: vpmovusdb %ymm0, %xmm0
|
||||
; SKX-NEXT: vzeroupper
|
||||
; SKX-NEXT: retq
|
||||
%tmp1 = icmp ult <8 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
||||
|
|
|
@ -1432,9 +1432,7 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %p, <8 x i32> %mask)
|
|||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
|
||||
; AVX512F-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpmovqb %zmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsqb %zmm0, %xmm0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: testb $1, %al
|
||||
; AVX512F-NEXT: jne .LBB2_1
|
||||
|
|
|
@ -1215,8 +1215,7 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %p, <8 x i32> %mask)
|
|||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
|
||||
; AVX512F-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpmovqb %zmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovusqb %zmm0, %xmm0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: testb $1, %al
|
||||
; AVX512F-NEXT: jne .LBB2_1
|
||||
|
|
|
@ -1658,10 +1658,9 @@ define <8 x i8> @trunc_packus_v8i64_v8i8(<8 x i64> %a0) {
|
|||
;
|
||||
; AVX512-LABEL: trunc_packus_v8i64_v8i8:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpmovqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vpmovusqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vzeroupper
|
||||
; AVX512-NEXT: retq
|
||||
%1 = icmp slt <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
|
||||
|
@ -2790,10 +2789,9 @@ define <8 x i8> @trunc_packus_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512VL-LABEL: trunc_packus_v8i32_v8i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vpmovusdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vzeroupper
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
@ -2809,10 +2807,9 @@ define <8 x i8> @trunc_packus_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512BWVL-LABEL: trunc_packus_v8i32_v8i8:
|
||||
; AVX512BWVL: # %bb.0:
|
||||
; AVX512BWVL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512BWVL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
|
||||
; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vpmovusdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vzeroupper
|
||||
; AVX512BWVL-NEXT: retq
|
||||
%1 = icmp slt <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
||||
|
|
|
@ -1567,9 +1567,7 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(<8 x i64> %a0) {
|
|||
;
|
||||
; AVX512-LABEL: trunc_ssat_v8i64_v8i8:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpmovqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vpmovsqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vzeroupper
|
||||
; AVX512-NEXT: retq
|
||||
%1 = icmp slt <8 x i64> %a0, <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
|
||||
|
@ -2776,9 +2774,7 @@ define <8 x i8> @trunc_ssat_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512VL-LABEL: trunc_ssat_v8i32_v8i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmaxsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vpmovsdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vzeroupper
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
@ -2794,9 +2790,7 @@ define <8 x i8> @trunc_ssat_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512BWVL-LABEL: trunc_ssat_v8i32_v8i8:
|
||||
; AVX512BWVL: # %bb.0:
|
||||
; AVX512BWVL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512BWVL-NEXT: vpmaxsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vpmovsdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vzeroupper
|
||||
; AVX512BWVL-NEXT: retq
|
||||
%1 = icmp slt <8 x i32> %a0, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
|
||||
|
|
|
@ -1175,8 +1175,7 @@ define <8 x i8> @trunc_usat_v8i64_v8i8(<8 x i64> %a0) {
|
|||
;
|
||||
; AVX512-LABEL: trunc_usat_v8i64_v8i8:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
|
||||
; AVX512-NEXT: vpmovqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vpmovusqb %zmm0, %xmm0
|
||||
; AVX512-NEXT: vzeroupper
|
||||
; AVX512-NEXT: retq
|
||||
%1 = icmp ult <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
|
||||
|
@ -1952,8 +1951,7 @@ define <8 x i8> @trunc_usat_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512VL-LABEL: trunc_usat_v8i32_v8i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vpmovusdb %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vzeroupper
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
@ -1967,8 +1965,7 @@ define <8 x i8> @trunc_usat_v8i32_v8i8(<8 x i32> %a0) {
|
|||
;
|
||||
; AVX512BWVL-LABEL: trunc_usat_v8i32_v8i8:
|
||||
; AVX512BWVL: # %bb.0:
|
||||
; AVX512BWVL-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
|
||||
; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vpmovusdb %ymm0, %xmm0
|
||||
; AVX512BWVL-NEXT: vzeroupper
|
||||
; AVX512BWVL-NEXT: retq
|
||||
%1 = icmp ult <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
|
||||
|
|
Loading…
Reference in New Issue