forked from OSchip/llvm-project
[X86] combineX86ShufflesConstants - constant fold from target shuffles unless optsize = true
Currently we only constant fold target shuffles if any of the sources has one use, or it would remove a variable shuffle mask - the aim being to avoid constant pool bloat. This patch proposes we should constant fold by default and only limit this if optsize is enabled - I've added a basic test for this in vector-mul.ll (the pmuludq case is by far the most common), I can add other specific test cases if people need them. This should permit further constant folding, break some instruction dependencies and help reduce shuffle port pressure. Differential Revision: https://reviews.llvm.org/D113748
This commit is contained in:
parent
ebda5e1e52
commit
59087dce3b
|
@ -37759,10 +37759,11 @@ static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
// Only fold if at least one of the constants is only used once or
|
||||
// the combined shuffle has included a variable mask shuffle, this
|
||||
// is to avoid constant pool bloat.
|
||||
if (!OneUseConstantOp && !HasVariableMask)
|
||||
// If we're optimizing for size, only fold if at least one of the constants is
|
||||
// only used once or the combined shuffle has included a variable mask
|
||||
// shuffle, this is to avoid constant pool bloat.
|
||||
bool IsOptimizingSize = DAG.shouldOptForSize();
|
||||
if (IsOptimizingSize && !OneUseConstantOp && !HasVariableMask)
|
||||
return SDValue();
|
||||
|
||||
// Shuffle the constant bits according to the mask.
|
||||
|
|
|
@ -48,11 +48,11 @@ define void @concat_of_broadcast_v2f64_v4f64() {
|
|||
; AVX1-NEXT: movl $1091567616, 30256(%rax) # imm = 0x41100000
|
||||
; AVX1-NEXT: movabsq $4294967297, %rcx # imm = 0x100000001
|
||||
; AVX1-NEXT: movq %rcx, 46348(%rax)
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = <1.0E+0,1.0E+0,u,u>
|
||||
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vmovups %ymm1, 48296(%rax)
|
||||
; AVX1-NEXT: vmovlps %xmm0, 47372(%rax)
|
||||
; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = [7.812501848093234E-3,7.812501848093234E-3,7.812501848093234E-3,7.812501848093234E-3]
|
||||
; AVX1-NEXT: # ymm0 = mem[0,1,0,1]
|
||||
; AVX1-NEXT: vmovups %ymm0, 48296(%rax)
|
||||
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX1-NEXT: vmovsd %xmm0, 47372(%rax)
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -141,12 +141,10 @@ define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
|
|||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [11,22,33,44]
|
||||
; CHECK-NEXT: paddd %xmm0, %xmm1
|
||||
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [22,33,44,55]
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; CHECK-NEXT: pmuludq %xmm2, %xmm0
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; CHECK-NEXT: pmuludq %xmm3, %xmm2
|
||||
; CHECK-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420]
|
||||
|
|
|
@ -202,17 +202,15 @@ define i32 @PR43159(<4 x i32>* %a0) {
|
|||
; SSE-LABEL: PR43159:
|
||||
; SSE: # %bb.0: # %entry
|
||||
; SSE-NEXT: movdqa (%rdi), %xmm0
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: psrld $1, %xmm2
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE-NEXT: psubd %xmm3, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: psrld $1, %xmm1
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; SSE-NEXT: psubd %xmm2, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: pxor %xmm2, %xmm2
|
||||
|
@ -230,14 +228,12 @@ define i32 @PR43159(<4 x i32>* %a0) {
|
|||
; AVX1-LABEL: PR43159:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm3[4,5],xmm0[6,7]
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
|
@ -256,12 +252,10 @@ define i32 @PR43159(<4 x i32>* %a0) {
|
|||
; AVX2-LABEL: PR43159:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
|
||||
; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
|
||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
|
||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX2-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
|
||||
; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
||||
|
@ -281,12 +275,10 @@ define i32 @PR43159(<4 x i32>* %a0) {
|
|||
; AVX512VL-LABEL: PR43159:
|
||||
; AVX512VL: # %bb.0: # %entry
|
||||
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
|
||||
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
|
||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
|
||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX512VL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
|
||||
; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
||||
|
@ -306,12 +298,10 @@ define i32 @PR43159(<4 x i32>* %a0) {
|
|||
; AVX512DQVL-LABEL: PR43159:
|
||||
; AVX512DQVL: # %bb.0: # %entry
|
||||
; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
|
||||
; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
|
||||
; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
|
||||
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
|
||||
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX512DQVL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX512DQVL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
|
||||
; AVX512DQVL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
|
||||
|
|
|
@ -8,18 +8,16 @@
|
|||
define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
|
||||
; SSE2-LABEL: combine_vec_rot_rot:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [524288,131072,32768,8192]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: combine_vec_rot_rot:
|
||||
|
|
|
@ -84,13 +84,13 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65536,32768,16384,8192]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_shl_known_zero1:
|
||||
|
@ -188,12 +188,10 @@ define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
|
||||
; SSE2-LABEL: combine_vec_shl_shl1:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,64,256,1024]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
|
@ -296,23 +294,19 @@ define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
||||
; SSE2-NEXT: psrad $16, %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [131072,524288,2097152,8388608]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm4, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: psrad $16, %xmm0
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [33554432,134217728,536870912,2147483648]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm4, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
|
@ -432,13 +426,11 @@ define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
|
|||
; SSE2-NEXT: psrad $8, %xmm1
|
||||
; SSE2-NEXT: psrad $4, %xmm0
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_shl_ge_ashr_extact1:
|
||||
|
@ -511,13 +503,11 @@ define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
|
|||
; SSE2-NEXT: psrad $8, %xmm1
|
||||
; SSE2-NEXT: psrad $6, %xmm0
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_shl_lt_ashr_extact1:
|
||||
|
@ -576,13 +566,11 @@ define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
|
|||
; SSE2-NEXT: psrld $8, %xmm1
|
||||
; SSE2-NEXT: psrld $4, %xmm0
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_shl_gt_lshr1:
|
||||
|
@ -641,13 +629,11 @@ define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
|
|||
; SSE2-NEXT: psrld $8, %xmm1
|
||||
; SSE2-NEXT: psrld $6, %xmm0
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_shl_le_lshr1:
|
||||
|
@ -729,12 +715,10 @@ define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
|
||||
; SSE2-LABEL: combine_vec_shl_add1:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
@ -778,12 +762,10 @@ define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
|
||||
; SSE2-LABEL: combine_vec_shl_or1:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
@ -836,12 +818,10 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
|
||||
; SSE2-LABEL: combine_vec_shl_mul1:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [10,24,56,128]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
|
@ -864,12 +844,10 @@ define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) {
|
||||
; SSE2-LABEL: combine_vec_add_shl_nonsplat:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,8,16,32]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
@ -897,14 +875,14 @@ define <4 x i32> @combine_vec_add_shl_and_nonsplat(<4 x i32> %a0) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,8,16,32]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
||||
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: combine_vec_add_shl_and_nonsplat:
|
||||
|
|
|
@ -512,15 +512,13 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; X86-SSE2-NEXT: pslld $23, %xmm1
|
||||
; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pand %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
|
@ -539,15 +537,13 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; X64-SSE2-NEXT: pslld $23, %xmm1
|
||||
; X64-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; X64-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
; X64-SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; X64-SSE2-NEXT: pand %xmm1, %xmm0
|
||||
; X64-SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; X64-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
|
||||
; X64-SSE2-NEXT: retq
|
||||
|
|
|
@ -2056,13 +2056,11 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
||||
; SSE2-NEXT: psrad $16, %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,4294934528,0,0]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; SSE2-NEXT: pmuludq %xmm4, %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
|
||||
; SSE2-NEXT: paddd %xmm2, %xmm1
|
||||
|
|
|
@ -131,23 +131,22 @@ define <4 x i1> @p5_vector_urem_by_const__nonsplat(<4 x i32> %x, <4 x i32> %y) {
|
|||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,3435973837,2863311531,954437177]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm2
|
||||
; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
||||
; SSE2-NEXT: por %xmm2, %xmm1
|
||||
; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
|
||||
; SSE2-NEXT: pxor %xmm2, %xmm0
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE4-LABEL: p5_vector_urem_by_const__nonsplat:
|
||||
|
|
|
@ -83,14 +83,12 @@ define <4 x i32> @test5(<4 x i32> %x) {
|
|||
; X86-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-NEXT: psrad $3, %xmm1
|
||||
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3264175145,3264175145]
|
||||
; X86-NEXT: movaps %xmm1, %xmm0
|
||||
; X86-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2863311531,2863311531,3264175145,3264175145]
|
||||
; X86-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X86-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
||||
; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
|
@ -110,14 +108,12 @@ define <4 x i32> @test6(<4 x i32> %x) {
|
|||
; X86-NEXT: psrad $3, %xmm1
|
||||
; X86-NEXT: psrad $1, %xmm0
|
||||
; X86-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3303820997,3303820997]
|
||||
; X86-NEXT: movapd %xmm0, %xmm1
|
||||
; X86-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,3303820997,3303820997]
|
||||
; X86-NEXT: pmuludq %xmm0, %xmm1
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X86-NEXT: pmuludq %xmm0, %xmm2
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
||||
; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
||||
; X86-NEXT: movdqa %xmm1, %xmm0
|
||||
; X86-NEXT: retl
|
||||
|
@ -134,12 +130,10 @@ define <4 x i32> @test6(<4 x i32> %x) {
|
|||
define <4 x i32> @test7(<4 x i32> %x) {
|
||||
; X86-LABEL: test7:
|
||||
; X86: # %bb.0:
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,1749801491,1749801491]
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-NEXT: retl
|
||||
|
@ -158,14 +152,12 @@ define <4 x i32> @test8(<4 x i32> %x) {
|
|||
; X86-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-NEXT: psrad $3, %xmm1
|
||||
; X86-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,1,2863311531,2863311531]
|
||||
; X86-NEXT: movapd %xmm1, %xmm0
|
||||
; X86-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm0 = [1,1,2863311531,2863311531]
|
||||
; X86-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X86-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
|
||||
; X86-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -138,25 +138,26 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
|
|||
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
||||
; SSE2-NEXT: psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = <683,1463,819,u>
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2047,2047,2047,2047]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE2-NEXT: pand %xmm0, %xmm2
|
||||
; SSE2-NEXT: psrld $1, %xmm2
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm2[0],xmm3[1,2,3]
|
||||
; SSE2-NEXT: pslld $10, %xmm1
|
||||
; SSE2-NEXT: xorps %xmm2, %xmm2
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
|
||||
; SSE2-NEXT: orps %xmm3, %xmm2
|
||||
; SSE2-NEXT: andps %xmm0, %xmm2
|
||||
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: movl $1463, %eax # imm = 0x5B7
|
||||
; SSE2-NEXT: movd %eax, %xmm3
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2047,2047,2047,2047]
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: pand %xmm1, %xmm3
|
||||
; SSE2-NEXT: psrld $1, %xmm3
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
|
||||
; SSE2-NEXT: pslld $10, %xmm0
|
||||
; SSE2-NEXT: xorps %xmm3, %xmm3
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
|
||||
; SSE2-NEXT: orps %xmm2, %xmm3
|
||||
; SSE2-NEXT: andps %xmm1, %xmm3
|
||||
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp)
|
||||
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
|
||||
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %dl
|
||||
; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -138,12 +138,10 @@ define <4 x i32> @test_urem_even_100(<4 x i32> %X) nounwind {
|
|||
define <4 x i32> @test_urem_odd_neg25(<4 x i32> %X) nounwind {
|
||||
; CHECK-SSE2-LABEL: test_urem_odd_neg25:
|
||||
; CHECK-SSE2: # %bb.0:
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,1030792151,1030792151,3264175145]
|
||||
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
|
||||
; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
|
|
@ -118,7 +118,7 @@ define <8 x i32> @test6(<8 x i32> %a) {
|
|||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2,2,8,8]
|
||||
; SSE2-NEXT: pmuludq %xmm4, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
|
@ -191,7 +191,7 @@ define <16 x i32> @test8(<16 x i32> %a) {
|
|||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm4, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2,2,8,8]
|
||||
; SSE2-NEXT: pmuludq %xmm6, %xmm5
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
||||
|
|
|
@ -2188,12 +2188,10 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; SSE2-NEXT: psrld $28, %xmm1
|
||||
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE2-NEXT: por %xmm1, %xmm0
|
||||
|
@ -2304,12 +2302,10 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; X86-SSE2-NEXT: psrld $28, %xmm1
|
||||
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
||||
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: por %xmm1, %xmm0
|
||||
|
|
|
@ -1478,46 +1478,40 @@ define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x) nounwind {
|
|||
define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v4i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v4i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; SSE41-NEXT: por %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: por %xmm2, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v4i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v4i32:
|
||||
|
@ -1576,18 +1570,16 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v4i32:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
|
||||
ret <4 x i32> %res
|
||||
|
|
|
@ -1160,27 +1160,23 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
|
|||
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
|
||||
; AVX1-LABEL: constant_funnnel_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [256,512,1024,2048]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,32,64,128]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -317,46 +317,40 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
|
|||
define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v2i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,1,1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v2i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,1,1]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; SSE41-NEXT: por %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: por %xmm2, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v2i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16,32,1,1]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v2i32:
|
||||
|
@ -415,18 +409,16 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v2i32:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,1,1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %x, <2 x i32> %x, <2 x i32> <i32 4, i32 5>)
|
||||
ret <2 x i32> %res
|
||||
|
|
|
@ -1894,12 +1894,10 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; SSE2-NEXT: psrld $4, %xmm1
|
||||
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; SSE2-NEXT: por %xmm1, %xmm0
|
||||
|
@ -2011,12 +2009,10 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|||
; X86-SSE2-NEXT: psrld $4, %xmm1
|
||||
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
||||
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: por %xmm1, %xmm0
|
||||
|
|
|
@ -1566,46 +1566,40 @@ define <2 x i64> @constant_funnnel_v2i64(<2 x i64> %x) nounwind {
|
|||
define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v4i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v4i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; SSE41-NEXT: por %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: por %xmm2, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v4i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v4i32:
|
||||
|
@ -1664,18 +1658,16 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v4i32:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,67108864,33554432]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 4, i32 5, i32 6, i32 7>)
|
||||
ret <4 x i32> %res
|
||||
|
|
|
@ -1257,27 +1257,23 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
|
|||
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
|
||||
; AVX1-LABEL: constant_funnnel_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16777216,8388608,4194304,2097152]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [268435456,134217728,67108864,33554432]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -344,46 +344,40 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
|
|||
define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v2i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,1,1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v2i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,1,1]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; SSE41-NEXT: por %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: por %xmm2, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v2i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [268435456,134217728,1,1]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v2i32:
|
||||
|
@ -442,18 +436,16 @@ define <2 x i32> @constant_funnnel_v2i32(<2 x i32> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v2i32:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [268435456,134217728,1,1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> %x, <2 x i32> %x, <2 x i32> <i32 4, i32 5>)
|
||||
ret <2 x i32> %res
|
||||
|
|
|
@ -127,23 +127,31 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
|
|||
}
|
||||
|
||||
define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
|
||||
; SSE2-LABEL: mul_v4i32_1_2_4_8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: ret{{[l|q]}}
|
||||
; X86-SSE2-LABEL: mul_v4i32_1_2_4_8:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: retl
|
||||
;
|
||||
; X86-SSE4-LABEL: mul_v4i32_1_2_4_8:
|
||||
; X86-SSE4: # %bb.0:
|
||||
; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE4-NEXT: retl
|
||||
;
|
||||
; X64-SSE2-LABEL: mul_v4i32_1_2_4_8:
|
||||
; X64-SSE2: # %bb.0:
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X64-SSE2-NEXT: retq
|
||||
;
|
||||
; X64-SSE4-LABEL: mul_v4i32_1_2_4_8:
|
||||
; X64-SSE4: # %bb.0:
|
||||
; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
@ -167,6 +175,47 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
|
|||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
define <4 x i32> @mul_v4i32_1_2_4_8_optsize(<4 x i32> %a0) nounwind optsize {
|
||||
; SSE2-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; X86-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; X86-SSE4: # %bb.0:
|
||||
; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE4-NEXT: retl
|
||||
;
|
||||
; X64-SSE4-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; X64-SSE4: # %bb.0:
|
||||
; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; X64-SSE4-NEXT: retq
|
||||
;
|
||||
; X64-XOP-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; X64-XOP: # %bb.0:
|
||||
; X64-XOP-NEXT: vpshld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; X64-XOP-NEXT: retq
|
||||
;
|
||||
; X64-AVX2-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; X64-AVX2-NEXT: retq
|
||||
;
|
||||
; X64-AVX512DQ-LABEL: mul_v4i32_1_2_4_8_optsize:
|
||||
; X64-AVX512DQ: # %bb.0:
|
||||
; X64-AVX512DQ-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; X64-AVX512DQ-NEXT: retq
|
||||
%1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 4, i32 8>
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
|
||||
; X86-SSE-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
|
||||
; X86-SSE: # %bb.0:
|
||||
|
@ -949,23 +998,31 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
|
|||
}
|
||||
|
||||
define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
|
||||
; SSE2-LABEL: mul_v4i32_5_17_33_65:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [5,17,33,65]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: ret{{[l|q]}}
|
||||
; X86-SSE2-LABEL: mul_v4i32_5_17_33_65:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: retl
|
||||
;
|
||||
; X86-SSE4-LABEL: mul_v4i32_5_17_33_65:
|
||||
; X86-SSE4: # %bb.0:
|
||||
; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE4-NEXT: retl
|
||||
;
|
||||
; X64-SSE2-LABEL: mul_v4i32_5_17_33_65:
|
||||
; X64-SSE2: # %bb.0:
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X64-SSE2-NEXT: retq
|
||||
;
|
||||
; X64-SSE4-LABEL: mul_v4i32_5_17_33_65:
|
||||
; X64-SSE4: # %bb.0:
|
||||
; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
@ -1544,17 +1601,16 @@ define <2 x i64> @mul_v2i64_0_1(<2 x i64> %a0) nounwind {
|
|||
define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
|
||||
; X86-SSE-LABEL: mul_v2i64_neg_0_1:
|
||||
; X86-SSE: # %bb.0:
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE-NEXT: psrlq $32, %xmm1
|
||||
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,0,4294967295,4294967295]
|
||||
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,4294967295]
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
; X86-SSE-NEXT: psrlq $32, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq %xmm0, %xmm3
|
||||
; X86-SSE-NEXT: paddq %xmm1, %xmm3
|
||||
; X86-SSE-NEXT: psllq $32, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-SSE-NEXT: pmuludq %xmm1, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE-NEXT: paddq %xmm3, %xmm0
|
||||
; X86-SSE-NEXT: psllq $32, %xmm0
|
||||
; X86-SSE-NEXT: paddq %xmm2, %xmm0
|
||||
; X86-SSE-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: mul_v2i64_neg_0_1:
|
||||
|
@ -1606,17 +1662,16 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind {
|
|||
define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
|
||||
; X86-SSE-LABEL: mul_v2i64_15_neg_63:
|
||||
; X86-SSE: # %bb.0:
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE-NEXT: psrlq $32, %xmm1
|
||||
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,0,4294967233,4294967295]
|
||||
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,0,4294967233,4294967295]
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE-NEXT: pmuludq %xmm1, %xmm2
|
||||
; X86-SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
; X86-SSE-NEXT: psrlq $32, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq %xmm0, %xmm3
|
||||
; X86-SSE-NEXT: paddq %xmm1, %xmm3
|
||||
; X86-SSE-NEXT: psllq $32, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
|
||||
; X86-SSE-NEXT: pmuludq %xmm1, %xmm3
|
||||
; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE-NEXT: paddq %xmm3, %xmm0
|
||||
; X86-SSE-NEXT: psllq $32, %xmm0
|
||||
; X86-SSE-NEXT: paddq %xmm2, %xmm0
|
||||
; X86-SSE-NEXT: retl
|
||||
;
|
||||
; X64-SSE-LABEL: mul_v2i64_15_neg_63:
|
||||
|
@ -1666,23 +1721,31 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
|
|||
}
|
||||
|
||||
define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
|
||||
; SSE2-LABEL: mul_v4i32_0_15_31_7:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,15,31,7]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: ret{{[l|q]}}
|
||||
; X86-SSE2-LABEL: mul_v4i32_0_15_31_7:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: retl
|
||||
;
|
||||
; X86-SSE4-LABEL: mul_v4i32_0_15_31_7:
|
||||
; X86-SSE4: # %bb.0:
|
||||
; X86-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE4-NEXT: retl
|
||||
;
|
||||
; X64-SSE2-LABEL: mul_v4i32_0_15_31_7:
|
||||
; X64-SSE2: # %bb.0:
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X64-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X64-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X64-SSE2-NEXT: retq
|
||||
;
|
||||
; X64-SSE4-LABEL: mul_v4i32_0_15_31_7:
|
||||
; X64-SSE4: # %bb.0:
|
||||
; X64-SSE4-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
|
|
|
@ -1435,46 +1435,40 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
|
|||
define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
|
||||
; SSE2-LABEL: constant_rotate_v4i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: por %xmm2, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_rotate_v4i32:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; SSE41-NEXT: por %xmm1, %xmm0
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; SSE41-NEXT: por %xmm2, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_rotate_v4i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_rotate_v4i32:
|
||||
|
@ -1533,18 +1527,16 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_rotate_v4i32:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
|
||||
; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE2-NEXT: por %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%shl = shl <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
|
||||
%lshr = lshr <4 x i32> %a, <i32 28, i32 27, i32 26, i32 25>
|
||||
|
|
|
@ -1137,27 +1137,23 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
|
|||
define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX1-LABEL: constant_rotate_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [256,512,1024,2048]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,32,64,128]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -742,12 +742,10 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
|
|||
define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; SSE2-LABEL: constant_shift_v4i32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
||||
; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE2-NEXT: retq
|
||||
|
@ -789,12 +787,10 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
|||
;
|
||||
; X86-SSE-LABEL: constant_shift_v4i32:
|
||||
; X86-SSE: # %bb.0:
|
||||
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
||||
; X86-SSE-NEXT: pmuludq %xmm1, %xmm0
|
||||
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
|
||||
; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
||||
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
|
||||
; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; X86-SSE-NEXT: retl
|
||||
|
|
|
@ -3103,16 +3103,38 @@ entry:
|
|||
declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32)
|
||||
|
||||
define void @PR43024() {
|
||||
; SSE-LABEL: PR43024:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0]
|
||||
; SSE-NEXT: movaps %xmm0, (%rax)
|
||||
; SSE-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE-NEXT: addss %xmm1, %xmm0
|
||||
; SSE-NEXT: addss %xmm1, %xmm0
|
||||
; SSE-NEXT: movss %xmm0, (%rax)
|
||||
; SSE-NEXT: retq
|
||||
; SSE2-LABEL: PR43024:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0]
|
||||
; SSE2-NEXT: movaps %xmm0, (%rax)
|
||||
; SSE2-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE2-NEXT: addss %xmm1, %xmm0
|
||||
; SSE2-NEXT: addss %xmm1, %xmm0
|
||||
; SSE2-NEXT: movss %xmm0, (%rax)
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSSE3-LABEL: PR43024:
|
||||
; SSSE3: # %bb.0:
|
||||
; SSSE3-NEXT: movaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0]
|
||||
; SSSE3-NEXT: movaps %xmm0, (%rax)
|
||||
; SSSE3-NEXT: addss %xmm0, %xmm0
|
||||
; SSSE3-NEXT: xorps %xmm1, %xmm1
|
||||
; SSSE3-NEXT: addss %xmm1, %xmm0
|
||||
; SSSE3-NEXT: addss %xmm1, %xmm0
|
||||
; SSSE3-NEXT: movss %xmm0, (%rax)
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: PR43024:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0]
|
||||
; SSE41-NEXT: movaps %xmm0, (%rax)
|
||||
; SSE41-NEXT: addss %xmm0, %xmm0
|
||||
; SSE41-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE41-NEXT: addss %xmm1, %xmm0
|
||||
; SSE41-NEXT: addss %xmm1, %xmm0
|
||||
; SSE41-NEXT: movss %xmm0, (%rax)
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: PR43024:
|
||||
; AVX: # %bb.0:
|
||||
|
@ -3292,19 +3314,17 @@ define void @SpinningCube() {
|
|||
; SSE2: # %bb.0: # %entry
|
||||
; SSE2-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
|
||||
; SSE2-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
|
||||
; SSE2-NEXT: movaps {{.*#+}} xmm1 = <0.0E+0,-2.0E+0,u,u>
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[1,3]
|
||||
; SSE2-NEXT: xorps %xmm4, %xmm4
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,0]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,3]
|
||||
; SSE2-NEXT: addps %xmm4, %xmm2
|
||||
; SSE2-NEXT: movaps %xmm2, (%rax)
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,0]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,1,3]
|
||||
; SSE2-NEXT: mulps %xmm2, %xmm1
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: movapd {{.*#+}} xmm2 = <u,u,-2.0E+0,u>
|
||||
; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
|
||||
; SSE2-NEXT: xorps %xmm3, %xmm3
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,0]
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
|
||||
; SSE2-NEXT: addps %xmm3, %xmm1
|
||||
; SSE2-NEXT: movaps %xmm1, (%rax)
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
|
||||
; SSE2-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: addps %xmm0, %xmm1
|
||||
; SSE2-NEXT: movaps %xmm1, (%rax)
|
||||
; SSE2-NEXT: retq
|
||||
|
@ -3313,29 +3333,26 @@ define void @SpinningCube() {
|
|||
; SSSE3: # %bb.0: # %entry
|
||||
; SSSE3-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
|
||||
; SSSE3-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
|
||||
; SSSE3-NEXT: movaps {{.*#+}} xmm1 = <0.0E+0,-2.0E+0,u,u>
|
||||
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSSE3-NEXT: movaps %xmm2, %xmm3
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[1,3]
|
||||
; SSSE3-NEXT: xorps %xmm4, %xmm4
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,0]
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm0[2,3]
|
||||
; SSSE3-NEXT: addps %xmm4, %xmm2
|
||||
; SSSE3-NEXT: movaps %xmm2, (%rax)
|
||||
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,1,3]
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,2]
|
||||
; SSSE3-NEXT: mulps %xmm1, %xmm2
|
||||
; SSSE3-NEXT: addps %xmm0, %xmm2
|
||||
; SSSE3-NEXT: movaps %xmm2, (%rax)
|
||||
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; SSSE3-NEXT: movapd {{.*#+}} xmm2 = <u,u,-2.0E+0,u>
|
||||
; SSSE3-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
|
||||
; SSSE3-NEXT: xorps %xmm3, %xmm3
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,0]
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
|
||||
; SSSE3-NEXT: addps %xmm3, %xmm1
|
||||
; SSSE3-NEXT: movaps %xmm1, (%rax)
|
||||
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,2]
|
||||
; SSSE3-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSSE3-NEXT: addps %xmm0, %xmm1
|
||||
; SSSE3-NEXT: movaps %xmm1, (%rax)
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: SpinningCube:
|
||||
; SSE41: # %bb.0: # %entry
|
||||
; SSE41-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm1 = <0.0E+0,-2.0E+0,u,u>
|
||||
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,1,3]
|
||||
; SSE41-NEXT: movaps {{.*#+}} xmm1 = <0.0E+0,0.0E+0,-2.0E+0,u>
|
||||
; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; SSE41-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE41-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[0]
|
||||
|
@ -3354,15 +3371,14 @@ define void @SpinningCube() {
|
|||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = <u,u,u,1.0E+0>
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = <0.0E+0,-2.0E+0,u,u>
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[0,0,1,3]
|
||||
; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
||||
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
|
||||
; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[2,3]
|
||||
; AVX1-NEXT: vaddps %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovaps %xmm2, (%rax)
|
||||
; AVX1-NEXT: vbroadcastss (%rax), %xmm2
|
||||
; AVX1-NEXT: vmulps %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = <0.0E+0,0.0E+0,-2.0E+0,u>
|
||||
; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
|
||||
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[2,3]
|
||||
; AVX1-NEXT: vaddps %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovaps %xmm1, (%rax)
|
||||
; AVX1-NEXT: vbroadcastss (%rax), %xmm1
|
||||
; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,3]
|
||||
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vmovaps %xmm0, (%rax)
|
||||
|
@ -3372,15 +3388,14 @@ define void @SpinningCube() {
|
|||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: movl $1065353216, (%rax) # imm = 0x3F800000
|
||||
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} xmm1 = <0.0E+0,-2.0E+0,u,u>
|
||||
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[0,0,1,3]
|
||||
; AVX2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[2,3]
|
||||
; AVX2-NEXT: vaddps %xmm3, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vmovaps %xmm2, (%rax)
|
||||
; AVX2-NEXT: vbroadcastss (%rax), %xmm2
|
||||
; AVX2-NEXT: vmulps %xmm1, %xmm2, %xmm1
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} xmm1 = <0.0E+0,0.0E+0,-2.0E+0,u>
|
||||
; AVX2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[2,3]
|
||||
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vmovaps %xmm1, (%rax)
|
||||
; AVX2-NEXT: vbroadcastss (%rax), %xmm1
|
||||
; AVX2-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,3]
|
||||
; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX2-NEXT: vmovaps %xmm0, (%rax)
|
||||
|
|
|
@ -2408,36 +2408,28 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
|
|||
define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
|
||||
; SSE-LABEL: trunc_mul_const_v16i32_v16i8:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,2,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [8,9,10,11]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [12,13,14,15]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
||||
; SSE-NEXT: pmuludq %xmm5, %xmm4
|
||||
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
||||
|
|
Loading…
Reference in New Issue