forked from OSchip/llvm-project
[X86][AVX] Fixed issue where a long chain of shuffles could attempt to combine to a single (illegal) PSHUFB instruction.
Its not enough that we test for SSSE3 - that's only OK for 128-bit vectors - we also need to test for AVX2 / AVX512BW for 256/512 bit vector cases. llvm-svn: 263239
This commit is contained in:
parent
1cdd7d5448
commit
7ca9614c71
|
@ -23945,7 +23945,10 @@ static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
|
|||
// can replace them with a single PSHUFB instruction profitably. Intel's
|
||||
// manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
|
||||
// in practice PSHUFB tends to be *very* fast so we're more aggressive.
|
||||
if ((Depth >= 3 || HasPSHUFB) && Subtarget.hasSSSE3()) {
|
||||
if ((Depth >= 3 || HasPSHUFB) &&
|
||||
((VT.is128BitVector() && Subtarget.hasSSSE3()) ||
|
||||
(VT.is256BitVector() && Subtarget.hasAVX2()) ||
|
||||
(VT.is512BitVector() && Subtarget.hasBWI()))) {
|
||||
SmallVector<SDValue, 16> PSHUFBMask;
|
||||
int NumBytes = VT.getSizeInBits() / 8;
|
||||
int Ratio = NumBytes / Mask.size();
|
||||
|
|
|
@ -19,8 +19,8 @@ declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8)
|
|||
declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8)
|
||||
declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
|
||||
|
||||
define <4 x float> @combine_vpermilvar_4f32(<4 x float> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_4f32:
|
||||
define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_4f32_identity:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: retq
|
||||
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
||||
|
@ -28,8 +28,8 @@ define <4 x float> @combine_vpermilvar_4f32(<4 x float> %a0) {
|
|||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <8 x float> @combine_vpermilvar_8f32(<8 x float> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_8f32:
|
||||
define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_8f32_identity:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: retq
|
||||
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
|
||||
|
@ -37,8 +37,8 @@ define <8 x float> @combine_vpermilvar_8f32(<8 x float> %a0) {
|
|||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <2 x double> @combine_vpermilvar_2f64(<2 x double> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_2f64:
|
||||
define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_2f64_identity:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: retq
|
||||
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
|
||||
|
@ -46,11 +46,49 @@ define <2 x double> @combine_vpermilvar_2f64(<2 x double> %a0) {
|
|||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <4 x double> @combine_vpermilvar_4f64(<4 x double> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_4f64:
|
||||
define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_4f64_identity:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: retq
|
||||
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
||||
%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
|
||||
; ALL-LABEL: combine_vpermilvar_4f32_4stage:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7]
|
||||
; ALL-NEXT: retq
|
||||
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
||||
%2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
|
||||
%3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
|
||||
%4 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %3, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
|
||||
ret <4 x float> %4
|
||||
}
|
||||
|
||||
define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
|
||||
; AVX1-LABEL: combine_vpermilvar_8f32_4stage:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
|
||||
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
|
||||
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_vpermilvar_8f32_4stage:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7,24,25,26,27,16,17,18,19,28,29,30,31,20,21,22,23]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: combine_vpermilvar_8f32_4stage:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7,24,25,26,27,16,17,18,19,28,29,30,31,20,21,22,23]
|
||||
; AVX512F-NEXT: retq
|
||||
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
||||
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
|
||||
%3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
|
||||
%4 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %3, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
|
||||
ret <8 x float> %4
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue