forked from OSchip/llvm-project
[X86][SSE] Add cpu feature for aggressive combining to variable shuffles
As mentioned in D38318 and D40865, modern Intel processors prefer to combine multiple shuffles to a variable shuffle mask (PSHUFB/VPERMPS etc.) instead of having multiple stage 'fixed' shuffles which put more pressure on Port 5 (at the expense of extra shuffle mask loads). This patch provides a FeatureFastVariableShuffle target flag for Haswell+ CPUs that prefers combining 2 or more fixed shuffles to a single variable shuffle (default is 3 shuffles). The long term aim is to drive more of this from schedule data (probably via the MC) but we're not close to being ready for that yet. Differential Revision: https://reviews.llvm.org/D41323 llvm-svn: 321074
This commit is contained in:
parent
b536a2a5ba
commit
fd5df639a3
|
@ -263,6 +263,12 @@ def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
|
||||||
def FeatureSoftFloat
|
def FeatureSoftFloat
|
||||||
: SubtargetFeature<"soft-float", "UseSoftFloat", "true",
|
: SubtargetFeature<"soft-float", "UseSoftFloat", "true",
|
||||||
"Use software floating point features.">;
|
"Use software floating point features.">;
|
||||||
|
// On recent X86 (port bound) processors, its preferable to combine to a single shuffle
|
||||||
|
// using a variable mask over multiple fixed shuffles.
|
||||||
|
def FeatureFastVariableShuffle
|
||||||
|
: SubtargetFeature<"fast-variable-shuffle",
|
||||||
|
"HasFastVariableShuffle",
|
||||||
|
"true", "Shuffles with variable masks are fast">;
|
||||||
// On some X86 processors, there is no performance hazard to writing only the
|
// On some X86 processors, there is no performance hazard to writing only the
|
||||||
// lower parts of a YMM or ZMM register without clearing the upper part.
|
// lower parts of a YMM or ZMM register without clearing the upper part.
|
||||||
def FeatureFastPartialYMMorZMMWrite
|
def FeatureFastPartialYMMorZMMWrite
|
||||||
|
@ -620,7 +626,8 @@ def HSWFeatures : ProcessorFeatures<IVBFeatures.Value, [
|
||||||
FeatureERMSB,
|
FeatureERMSB,
|
||||||
FeatureFMA,
|
FeatureFMA,
|
||||||
FeatureLZCNT,
|
FeatureLZCNT,
|
||||||
FeatureMOVBE
|
FeatureMOVBE,
|
||||||
|
FeatureFastVariableShuffle
|
||||||
]>;
|
]>;
|
||||||
|
|
||||||
class HaswellProc<string Name> : ProcModel<Name, HaswellModel,
|
class HaswellProc<string Name> : ProcModel<Name, HaswellModel,
|
||||||
|
|
|
@ -28592,8 +28592,8 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
// Depth threshold above which we can efficiently use variable mask shuffles.
|
// Depth threshold above which we can efficiently use variable mask shuffles.
|
||||||
// TODO This should probably be target specific.
|
int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3;
|
||||||
bool AllowVariableMask = (Depth >= 3) || HasVariableMask;
|
bool AllowVariableMask = (Depth >= VariableShuffleDepth) || HasVariableMask;
|
||||||
|
|
||||||
bool MaskContainsZeros =
|
bool MaskContainsZeros =
|
||||||
any_of(Mask, [](int M) { return M == SM_SentinelZero; });
|
any_of(Mask, [](int M) { return M == SM_SentinelZero; });
|
||||||
|
|
|
@ -228,6 +228,10 @@ protected:
|
||||||
/// the stack pointer. This is an optimization for Intel Atom processors.
|
/// the stack pointer. This is an optimization for Intel Atom processors.
|
||||||
bool UseLeaForSP;
|
bool UseLeaForSP;
|
||||||
|
|
||||||
|
/// True if its preferable to combine to a single shuffle using a variable
|
||||||
|
/// mask over multiple fixed shuffles.
|
||||||
|
bool HasFastVariableShuffle;
|
||||||
|
|
||||||
/// True if there is no performance penalty to writing only the lower parts
|
/// True if there is no performance penalty to writing only the lower parts
|
||||||
/// of a YMM or ZMM register without clearing the upper part.
|
/// of a YMM or ZMM register without clearing the upper part.
|
||||||
bool HasFastPartialYMMorZMMWrite;
|
bool HasFastPartialYMMorZMMWrite;
|
||||||
|
@ -527,6 +531,9 @@ public:
|
||||||
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
|
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
|
||||||
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
|
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
|
||||||
bool useLeaForSP() const { return UseLeaForSP; }
|
bool useLeaForSP() const { return UseLeaForSP; }
|
||||||
|
bool hasFastVariableShuffle() const {
|
||||||
|
return HasFastVariableShuffle;
|
||||||
|
}
|
||||||
bool hasFastPartialYMMorZMMWrite() const {
|
bool hasFastPartialYMMorZMMWrite() const {
|
||||||
return HasFastPartialYMMorZMMWrite;
|
return HasFastPartialYMMorZMMWrite;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,9 @@
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX1
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-SLOW
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2OR512VL --check-prefix=AVX512VL
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-FAST
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2OR512VL --check-prefix=AVX512VL
|
||||||
|
|
||||||
define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
|
define <16 x i8> @shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i8> %a, <16 x i8> %b) {
|
||||||
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; SSE2-LABEL: shuffle_v16i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
@ -115,11 +116,28 @@ define <16 x i8> @shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03(
|
||||||
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
|
; AVX1-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v16i8_00_00_00_00_01_01_01_01_02_02_02_02_03_03_03_03:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
|
; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
|
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2, i32 3, i32 3, i32 3, i32 3>
|
||||||
ret <16 x i8> %shuffle
|
ret <16 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -131,11 +149,28 @@ define <16 x i8> @shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07(
|
||||||
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
|
; AVX1-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,4,4,4,5,5,5,5,6,6,6,6,7,7,7,7]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v16i8_04_04_04_04_05_05_05_05_06_06_06_06_07_07_07_07:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
|
; AVX512VL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7>
|
%shuffle = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 5, i32 5, i32 5, i32 5, i32 6, i32 6, i32 6, i32 6, i32 7, i32 7, i32 7, i32 7>
|
||||||
ret <16 x i8> %shuffle
|
ret <16 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,10 @@
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX1
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-SLOW
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2OR512VL --check-prefix=AVX512VL
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-FAST
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-SLOW
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-FAST
|
||||||
|
|
||||||
define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
|
define <8 x i16> @shuffle_v8i16_01012323(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-LABEL: shuffle_v8i16_01012323:
|
; SSE-LABEL: shuffle_v8i16_01012323:
|
||||||
|
@ -85,11 +87,33 @@ define <8 x i16> @shuffle_v8i16_00004444(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_00004444:
|
; AVX1-LABEL: shuffle_v8i16_00004444:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_00004444:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_00004444:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_00004444:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_00004444:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,8,9,8,9]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -126,11 +150,28 @@ define <8 x i16> @shuffle_v8i16_31206745(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_31206745:
|
; AVX1-LABEL: shuffle_v8i16_31206745:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_31206745:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_31206745:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,0,1,12,13,14,15,8,9,10,11]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8i16_31206745:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,0,4,5,6,7]
|
||||||
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 6, i32 7, i32 4, i32 5>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 6, i32 7, i32 4, i32 5>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -179,11 +220,28 @@ define <8 x i16> @shuffle_v8i16_23026745(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_23026745:
|
; AVX1-LABEL: shuffle_v8i16_23026745:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_23026745:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_23026745:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7,0,1,4,5,12,13,14,15,8,9,10,11]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8i16_23026745:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,3,0,2,4,5,6,7]
|
||||||
|
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 3, i32 0, i32 2, i32 6, i32 7, i32 4, i32 5>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 3, i32 0, i32 2, i32 6, i32 7, i32 4, i32 5>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -194,11 +252,33 @@ define <8 x i16> @shuffle_v8i16_23016747(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_23016747:
|
; AVX1-LABEL: shuffle_v8i16_23016747:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
||||||
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_23016747:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_23016747:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7,0,1,2,3,12,13,14,15,8,9,14,15]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_23016747:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,4,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_23016747:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7,0,1,2,3,12,13,14,15,8,9,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 7>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 7>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -597,11 +677,33 @@ define <8 x i16> @shuffle_v8i16_04404567(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_04404567:
|
; AVX1-LABEL: shuffle_v8i16_04404567:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_04404567:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_04404567:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_04404567:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,0,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_04404567:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,0,1,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 5, i32 6, i32 7>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 4, i32 4, i32 0, i32 4, i32 5, i32 6, i32 7>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -705,12 +807,18 @@ define <8 x i16> @shuffle_v8i16_0127XXXX(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
|
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,4,5,14,15,12,13,14,15]
|
||||||
; AVX1OR2-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i16_0127XXXX:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_0127XXXX:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||||
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_0127XXXX:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,4,5,14,15,12,13,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -738,12 +846,18 @@ define <8 x i16> @shuffle_v8i16_XXXX4563(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7]
|
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,12,13,6,7]
|
||||||
; AVX1OR2-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i16_XXXX4563:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_XXXX4563:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
||||||
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_XXXX4563:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 3>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 5, i32 6, i32 3>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -771,12 +885,18 @@ define <8 x i16> @shuffle_v8i16_4563XXXX(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
|
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,8,9,10,11,0,1,2,3]
|
||||||
; AVX1OR2-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i16_4563XXXX:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_4563XXXX:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
||||||
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_4563XXXX:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,6,7,8,9,10,11,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -804,12 +924,18 @@ define <8 x i16> @shuffle_v8i16_01274563(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
|
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,14,15,8,9,10,11,12,13,6,7]
|
||||||
; AVX1OR2-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i16_01274563:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_01274563:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
||||||
; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_01274563:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,12,13,6,7,4,5,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,2]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6, i32 3>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 7, i32 4, i32 5, i32 6, i32 3>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -837,12 +963,18 @@ define <8 x i16> @shuffle_v8i16_45630127(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15]
|
; AVX1OR2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,6,7,0,1,2,3,4,5,14,15]
|
||||||
; AVX1OR2-NEXT: retq
|
; AVX1OR2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i16_45630127:
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_45630127:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
|
||||||
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,1,4,5,6,7]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_45630127:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,6,7,4,5,14,15,8,9,10,11,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,0,3,1]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 0, i32 1, i32 2, i32 7>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 3, i32 0, i32 1, i32 2, i32 7>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -980,12 +1112,38 @@ define <8 x i16> @shuffle_v8i16_109832ba(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_109832ba:
|
; AVX1-LABEL: shuffle_v8i16_109832ba:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_109832ba:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_109832ba:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,0,1,6,7,2,3,12,13,8,9,14,15,10,11]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_109832ba:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,0,3,1,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,7,5]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_109832ba:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,0,1,6,7,2,3,12,13,8,9,14,15,10,11]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 0, i32 9, i32 8, i32 3, i32 2, i32 11, i32 10>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1028,13 +1186,43 @@ define <8 x i16> @shuffle_v8i16_0213cedf(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; SSE-NEXT: retq
|
; SSE-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_0213cedf:
|
; AVX1-LABEL: shuffle_v8i16_0213cedf:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
|
||||||
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_0213cedf:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_0213cedf:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,12,13,10,11,14,15,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX2-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_0213cedf:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_0213cedf:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,12,13,10,11,14,15,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
|
||||||
|
; AVX512VL-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 12, i32 14, i32 13, i32 15>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 12, i32 14, i32 13, i32 15>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1064,12 +1252,38 @@ define <8 x i16> @shuffle_v8i16_443aXXXX(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX-LABEL: shuffle_v8i16_443aXXXX:
|
; AVX1-LABEL: shuffle_v8i16_443aXXXX:
|
||||||
; AVX: # %bb.0:
|
; AVX1: # %bb.0:
|
||||||
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
||||||
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
|
||||||
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
||||||
; AVX-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-SLOW-LABEL: shuffle_v8i16_443aXXXX:
|
||||||
|
; AVX2-SLOW: # %bb.0:
|
||||||
|
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
|
||||||
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
||||||
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_443aXXXX:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7,4,5,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_443aXXXX:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,3,2,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_443aXXXX:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7,4,5,8,9,10,11,12,13,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 3, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 4, i32 3, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1336,13 +1550,35 @@ define <8 x i16> @shuffle_v8i16_XXX1X579(<8 x i16> %a, <8 x i16> %b) {
|
||||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8i16_XXX1X579:
|
; AVX2-SLOW-LABEL: shuffle_v8i16_XXX1X579:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpbroadcastd %xmm1, %xmm1
|
; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||||
; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
|
||||||
; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
|
; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
|
||||||
; AVX2OR512VL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i16_XXX1X579:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,2,3,8,9,10,11,14,15,14,15]
|
||||||
|
; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i16_XXX1X579:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,1,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i16_XXX1X579:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpbroadcastd %xmm1, %xmm1
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,2,3,8,9,10,11,14,15,14,15]
|
||||||
|
; AVX512VL-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 5, i32 7, i32 9>
|
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 5, i32 7, i32 9>
|
||||||
ret <8 x i16> %shuffle
|
ret <8 x i16> %shuffle
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,9 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX1
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-SLOW
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-FAST
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-SLOW
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-FAST
|
||||||
|
|
||||||
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
|
define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<32 x i8> %a, <32 x i8> %b) {
|
||||||
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; AVX1-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
@ -431,12 +433,33 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; AVX2-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_00_20_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 20, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 20, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||||
ret <32 x i8> %shuffle
|
ret <32 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -452,12 +475,33 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; AVX2-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_00_21_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 21, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 21, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||||
ret <32 x i8> %shuffle
|
ret <32 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -473,12 +517,33 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; AVX2-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_00_22_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 22, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 22, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||||
ret <32 x i8> %shuffle
|
ret <32 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -494,12 +559,33 @@ define <32 x i8> @shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
; AVX2-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
; AVX2OR512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v32i8_00_00_00_00_00_00_00_00_23_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,6,7,0,1,2,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 23, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 23, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||||
ret <32 x i8> %shuffle
|
ret <32 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1092,15 +1178,24 @@ define <32 x i8> @shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
|
; AVX2-SLOW-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||||
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
|
; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm1, %ymm1
|
||||||
; AVX2-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
|
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
|
||||||
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
|
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
|
||||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
||||||
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
|
; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||||
|
; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm1
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,16,17,16,17,16,17,16,17,16,17,16,17,16,17,16,17]
|
||||||
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
|
||||||
|
; AVX2-FAST-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
|
; AVX512VL-LABEL: shuffle_v32i8_00_32_00_32_00_32_00_32_00_32_00_32_00_32_00_32_16_48_16_48_16_48_16_48_16_48_16_48_16_48_16_48:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL: # %bb.0:
|
||||||
|
@ -2210,14 +2305,21 @@ define <32 x i8> @shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_
|
||||||
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
|
; AVX512VL-SLOW-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
|
; AVX512VL-SLOW-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; AVX512VL-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
|
; AVX512VL-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
|
||||||
; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7]
|
||||||
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
; AVX512VL-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v32i8_15_15_15_15_15_15_15_15_32_32_32_32_32_32_32_32_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu_uu:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,15,15,15,15,15,15,15,12,12,13,13,14,14,15,15]
|
||||||
|
; AVX512VL-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
%shuffle = shufflevector <32 x i8> %a, <32 x i8> %b, <32 x i32> <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||||
ret <32 x i8> %shuffle
|
ret <32 x i8> %shuffle
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 --check-prefix=AVX2-SLOW
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 --check-prefix=AVX2-FAST
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512VL-SLOW
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512VL-FAST
|
||||||
|
|
||||||
define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
|
define <4 x double> @shuffle_v4f64_0000(<4 x double> %a, <4 x double> %b) {
|
||||||
; AVX1-LABEL: shuffle_v4f64_0000:
|
; AVX1-LABEL: shuffle_v4f64_0000:
|
||||||
|
@ -546,19 +548,29 @@ define <4 x double> @shuffle_v4f64_0z3z(<4 x double> %a, <4 x double> %b) {
|
||||||
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v4f64_0z3z:
|
; AVX2-SLOW-LABEL: shuffle_v4f64_0z3z:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
|
; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
|
||||||
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
; AVX2-SLOW-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
||||||
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
; AVX2-SLOW-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v4f64_0z3z:
|
; AVX2-FAST-LABEL: shuffle_v4f64_0z3z:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX2-FAST: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
; AVX2-FAST-NEXT: retq
|
||||||
; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
;
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-LABEL: shuffle_v4f64_0z3z:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,2]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX512VL-SLOW-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v4f64_0z3z:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 0, i32 4, i32 3, i32 4>
|
%shuffle = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 0, i32 4, i32 3, i32 4>
|
||||||
ret <4 x double> %shuffle
|
ret <4 x double> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -574,19 +586,29 @@ define <4 x double> @shuffle_v4f64_1z2z(<4 x double> %a, <4 x double> %b) {
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v4f64_1z2z:
|
; AVX2-SLOW-LABEL: shuffle_v4f64_1z2z:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
; AVX2-SLOW-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
||||||
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
|
; AVX2-SLOW-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v4f64_1z2z:
|
; AVX2-FAST-LABEL: shuffle_v4f64_1z2z:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX2-FAST: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
; AVX512VL-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
|
; AVX2-FAST-NEXT: retq
|
||||||
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
;
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-LABEL: shuffle_v4f64_1z2z:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX512VL-SLOW-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v4f64_1z2z:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%1 = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
|
%1 = shufflevector <4 x double> %a, <4 x double> <double 0.000000e+00, double undef, double undef, double undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
|
||||||
ret <4 x double> %1
|
ret <4 x double> %1
|
||||||
}
|
}
|
||||||
|
@ -980,11 +1002,17 @@ define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
|
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v4i64_2u35:
|
; AVX512VL-SLOW-LABEL: shuffle_v4i64_2u35:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
; AVX512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
|
; AVX512VL-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
|
||||||
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
|
; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,1]
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v4i64_2u35:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [2,5,3,5]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermt2q %ymm1, %ymm2, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 undef, i32 3, i32 5>
|
%shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 undef, i32 3, i32 5>
|
||||||
ret <4 x i64> %shuffle
|
ret <4 x i64> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1564,12 +1592,17 @@ define <4 x i64> @shuffle_v4i64_z0z3(<4 x i64> %a, <4 x i64> %b) {
|
||||||
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
|
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v4i64_z0z3:
|
; AVX2-SLOW-LABEL: shuffle_v4i64_z0z3:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,3]
|
||||||
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
|
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v4i64_z0z3:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[24,25,26,27,28,29,30,31]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v4i64_z0z3:
|
; AVX512VL-LABEL: shuffle_v4i64_z0z3:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL: # %bb.0:
|
||||||
|
@ -1592,19 +1625,29 @@ define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) {
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v4i64_1z2z:
|
; AVX2-SLOW-LABEL: shuffle_v4i64_1z2z:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
|
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v4i64_1z2z:
|
; AVX2-FAST-LABEL: shuffle_v4i64_1z2z:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX2-FAST: # %bb.0:
|
||||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
|
; AVX2-FAST-NEXT: retq
|
||||||
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
;
|
||||||
; AVX512VL-NEXT: retq
|
; AVX512VL-SLOW-LABEL: shuffle_v4i64_1z2z:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,2,0]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v4i64_1z2z:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23],zero,zero,zero,zero,zero,zero,zero,zero
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
|
%1 = shufflevector <4 x i64> %a, <4 x i64> <i64 0, i64 undef, i64 undef, i64 undef>, <4 x i32> <i32 1, i32 4, i32 2, i32 4>
|
||||||
ret <4 x i64> %1
|
ret <4 x i64> %1
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX1
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX1
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-SLOW
|
||||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX1OR2 --check-prefix=AVX2OR512VL --check-prefix=AVX2 --check-prefix=AVX2-FAST
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-SLOW
|
||||||
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq,+fast-variable-shuffle | FileCheck %s --check-prefix=ALL --check-prefix=AVX2OR512VL --check-prefix=AVX512VL --check-prefix=AVX512VL-FAST
|
||||||
|
|
||||||
define <8 x float> @shuffle_v8f32_00000000(<8 x float> %a, <8 x float> %b) {
|
define <8 x float> @shuffle_v8f32_00000000(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-LABEL: shuffle_v8f32_00000000:
|
; AVX1-LABEL: shuffle_v8f32_00000000:
|
||||||
|
@ -342,12 +344,26 @@ define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8f32_09ab1def:
|
; AVX2-LABEL: shuffle_v8f32_09ab1def:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||||
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
||||||
; AVX2OR512VL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8f32_09ab1def:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8f32_09ab1def:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
||||||
|
; AVX512VL-FAST-NEXT: vmovaps {{.*#+}} ymm0 = [8,1,2,3,10,5,6,7]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
|
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
|
||||||
ret <8 x float> %shuffle
|
ret <8 x float> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -651,14 +667,23 @@ define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
|
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v8f32_c348cda0:
|
; AVX2-SLOW-LABEL: shuffle_v8f32_c348cda0:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <4,u,u,0,4,5,2,u>
|
; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm2 = <4,u,u,0,4,5,2,u>
|
||||||
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
; AVX2-SLOW-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
||||||
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,2,0,4,7,6,4]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,2,0,4,7,6,4]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1]
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
|
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8f32_c348cda0:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,3,4,7,4,7,2,0]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = <4,u,u,0,4,5,2,u>
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
||||||
|
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8f32_c348cda0:
|
; AVX512VL-LABEL: shuffle_v8f32_c348cda0:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL: # %bb.0:
|
||||||
|
@ -681,14 +706,23 @@ define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
|
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v8f32_f511235a:
|
; AVX2-SLOW-LABEL: shuffle_v8f32_f511235a:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,2,3,7,6,6,7]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,2,3,7,6,6,7]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,0]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,0]
|
||||||
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,5,5,6,7]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,5,5,6,7]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,1,2]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,1,2]
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
|
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8f32_f511235a:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [7,6,2,3,7,6,3,2]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [5,5,1,1,2,3,5,5]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8f32_f511235a:
|
; AVX512VL-LABEL: shuffle_v8f32_f511235a:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL: # %bb.0:
|
||||||
|
@ -722,11 +756,23 @@ define <8 x float> @shuffle_v8f32_76547654(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8f32_76547654:
|
; AVX2-SLOW-LABEL: shuffle_v8f32_76547654:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8f32_76547654:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8f32_76547654:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
|
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
|
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
|
||||||
ret <8 x float> %shuffle
|
ret <8 x float> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -738,11 +784,23 @@ define <8 x float> @shuffle_v8f32_76543210(<8 x float> %a, <8 x float> %b) {
|
||||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8f32_76543210:
|
; AVX2-SLOW-LABEL: shuffle_v8f32_76543210:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8f32_76543210:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [7,6,5,4,3,2,1,0]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8f32_76543210:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
|
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
||||||
ret <8 x float> %shuffle
|
ret <8 x float> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1264,12 +1322,26 @@ define <8 x i32> @shuffle_v8i32_09ab1def(<8 x i32> %a, <8 x i32> %b) {
|
||||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8i32_09ab1def:
|
; AVX2-LABEL: shuffle_v8i32_09ab1def:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||||
; AVX2OR512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
||||||
; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-SLOW-LABEL: shuffle_v8i32_09ab1def:
|
||||||
|
; AVX512VL-SLOW: # %bb.0:
|
||||||
|
; AVX512VL-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||||
|
; AVX512VL-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
|
||||||
|
; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||||
|
; AVX512VL-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-FAST-LABEL: shuffle_v8i32_09ab1def:
|
||||||
|
; AVX512VL-FAST: # %bb.0:
|
||||||
|
; AVX512VL-FAST-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
|
||||||
|
; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [8,1,2,3,10,5,6,7]
|
||||||
|
; AVX512VL-FAST-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
|
||||||
|
; AVX512VL-FAST-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
|
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
|
||||||
ret <8 x i32> %shuffle
|
ret <8 x i32> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1696,13 +1768,21 @@ define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
|
||||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
|
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2-LABEL: shuffle_v8i32_6caa87e5:
|
; AVX2-SLOW-LABEL: shuffle_v8i32_6caa87e5:
|
||||||
; AVX2: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,3,2]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,3,2]
|
||||||
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
|
||||||
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,0,3]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,0,3]
|
||||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
|
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
|
||||||
; AVX2-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i32_6caa87e5:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [4,4,2,2,0,0,6,6]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
||||||
|
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,3,2]
|
||||||
|
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX512VL-LABEL: shuffle_v8i32_6caa87e5:
|
; AVX512VL-LABEL: shuffle_v8i32_6caa87e5:
|
||||||
; AVX512VL: # %bb.0:
|
; AVX512VL: # %bb.0:
|
||||||
|
@ -1737,11 +1817,23 @@ define <8 x i32> @shuffle_v8i32_76547654(<8 x i32> %a, <8 x i32> %b) {
|
||||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8i32_76547654:
|
; AVX2-SLOW-LABEL: shuffle_v8i32_76547654:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i32_76547654:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8i32_76547654:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
|
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
|
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
|
||||||
ret <8 x i32> %shuffle
|
ret <8 x i32> %shuffle
|
||||||
}
|
}
|
||||||
|
@ -1753,11 +1845,23 @@ define <8 x i32> @shuffle_v8i32_76543210(<8 x i32> %a, <8 x i32> %b) {
|
||||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
; AVX1-NEXT: retq
|
; AVX1-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX2OR512VL-LABEL: shuffle_v8i32_76543210:
|
; AVX2-SLOW-LABEL: shuffle_v8i32_76543210:
|
||||||
; AVX2OR512VL: # %bb.0:
|
; AVX2-SLOW: # %bb.0:
|
||||||
; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
; AVX2OR512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
; AVX2OR512VL-NEXT: retq
|
; AVX2-SLOW-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX2-FAST-LABEL: shuffle_v8i32_76543210:
|
||||||
|
; AVX2-FAST: # %bb.0:
|
||||||
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [7,6,5,4,3,2,1,0]
|
||||||
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||||
|
; AVX2-FAST-NEXT: retq
|
||||||
|
;
|
||||||
|
; AVX512VL-LABEL: shuffle_v8i32_76543210:
|
||||||
|
; AVX512VL: # %bb.0:
|
||||||
|
; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||||
|
; AVX512VL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
|
||||||
|
; AVX512VL-NEXT: retq
|
||||||
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
||||||
ret <8 x i32> %shuffle
|
ret <8 x i32> %shuffle
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,8 +186,7 @@ define <32 x i16> @shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19
|
||||||
;
|
;
|
||||||
; SKX-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
|
; SKX-LABEL: shuffle_v32i16_1_1_0_0_5_5_4_4_9_9_11_11_13_13_12_12_17_17_19_19_21_21_20_20_25_25_27_27_29_29_28_28:
|
||||||
; SKX: ## %bb.0:
|
; SKX: ## %bb.0:
|
||||||
; SKX-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[1,1,0,0,4,5,6,7,9,9,8,8,12,13,14,15,17,17,16,16,20,21,22,23,25,25,24,24,28,29,30,31]
|
; SKX-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[2,3,2,3,0,1,0,1,10,11,10,11,8,9,8,9,18,19,18,19,16,17,16,17,26,27,26,27,24,25,24,25,34,35,34,35,32,33,32,33,42,43,42,43,40,41,40,41,50,51,50,51,48,49,48,49,58,59,58,59,56,57,56,57]
|
||||||
; SKX-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,5,5,4,4,8,9,10,11,13,13,12,12,16,17,18,19,21,21,20,20,24,25,26,27,29,29,28,28]
|
|
||||||
; SKX-NEXT: retq
|
; SKX-NEXT: retq
|
||||||
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 5, i32 5, i32 4, i32 4, i32 9, i32 9, i32 8, i32 8, i32 13, i32 13, i32 12, i32 12, i32 17, i32 17, i32 16, i32 16, i32 21, i32 21, i32 20, i32 20, i32 25, i32 25, i32 24, i32 24, i32 29, i32 29, i32 28, i32 28>
|
%c = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <32 x i32> <i32 1, i32 1, i32 0, i32 0, i32 5, i32 5, i32 4, i32 4, i32 9, i32 9, i32 8, i32 8, i32 13, i32 13, i32 12, i32 12, i32 17, i32 17, i32 16, i32 16, i32 21, i32 21, i32 20, i32 20, i32 25, i32 25, i32 24, i32 24, i32 29, i32 29, i32 28, i32 28>
|
||||||
ret <32 x i16> %c
|
ret <32 x i16> %c
|
||||||
|
|
Loading…
Reference in New Issue