[x86] add tests for shift-trunc-shift; NFC

More coverage for a possible generic transform.
This commit is contained in:
Sanjay Patel 2019-12-12 18:19:57 -05:00
parent 99581fd4c8
commit dc9e6ba90b
2 changed files with 99 additions and 0 deletions

View File

@ -1552,3 +1552,25 @@ define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b
%shifted = lshr i64 %val, %negaaddbitwidthaddb
ret i64 %shifted
}
define i16 @sh_trunc_sh(i64 %x) {
; X32-LABEL: sh_trunc_sh:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl $4, %eax
; X32-NEXT: andl $15, %eax
; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: sh_trunc_sh:
; X64: # %bb.0:
; X64-NEXT: shrq $24, %rdi
; X64-NEXT: movzwl %di, %eax
; X64-NEXT: shrl $12, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%s = lshr i64 %x, 24
%t = trunc i64 %s to i16
%r = lshr i16 %t, 12
ret i16 %r
}

View File

@ -1394,3 +1394,80 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
define <4 x i32> @sh_trunc_sh_vec(<4 x i64> %x) {
; AVX1-LABEL: sh_trunc_sh_vec:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpsrlq $24, %xmm1, %xmm1
; AVX1-NEXT: vpsrlq $24, %xmm0, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vpsrld $12, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: sh_trunc_sh_vec:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX2-NEXT: vpsrld $12, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: sh_trunc_sh_vec:
; XOPAVX1: # %bb.0:
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[3,4,5,6,11,12,13,14],xmm1[3,4,5,6,11,12,13,14]
; XOPAVX1-NEXT: vpsrld $12, %xmm0, %xmm0
; XOPAVX1-NEXT: vzeroupper
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: sh_trunc_sh_vec:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; XOPAVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; XOPAVX2-NEXT: vpsrld $12, %xmm0, %xmm0
; XOPAVX2-NEXT: vzeroupper
; XOPAVX2-NEXT: retq
;
; AVX512-LABEL: sh_trunc_sh_vec:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq $24, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpsrld $12, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: sh_trunc_sh_vec:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpsrlq $24, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
; AVX512VL-NEXT: vpsrld $12, %xmm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; X32-AVX1-LABEL: sh_trunc_sh_vec:
; X32-AVX1: # %bb.0:
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X32-AVX1-NEXT: vpsrlq $24, %xmm1, %xmm1
; X32-AVX1-NEXT: vpsrlq $24, %xmm0, %xmm0
; X32-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX1-NEXT: vpsrld $12, %xmm0, %xmm0
; X32-AVX1-NEXT: vzeroupper
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: sh_trunc_sh_vec:
; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
; X32-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-AVX2-NEXT: vpsrld $12, %xmm0, %xmm0
; X32-AVX2-NEXT: vzeroupper
; X32-AVX2-NEXT: retl
%s = lshr <4 x i64> %x, <i64 24, i64 24, i64 24, i64 24>
%t = trunc <4 x i64> %s to <4 x i32>
%r = lshr <4 x i32> %t, <i32 12, i32 12, i32 12, i32 12>
ret <4 x i32> %r
}