llvm-project/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll

1322 lines
64 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
;
; Unary shuffle indices from registers
;
define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
; AVX: # BB#0:
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i64 %i0
%x1 = extractelement <2 x double> %x, i64 %i1
%r0 = insertelement <2 x double> undef, double %x0, i32 0
%r1 = insertelement <2 x double> %r0, double %x1, i32 1
ret <2 x double> %r1
}
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; SSE: # BB#0:
; SSE-NEXT: movslq %edi, %rax
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movslq %esi, %rcx
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; AVX: # BB#0:
; AVX-NEXT: movslq %edi, %rax
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq %esi, %rcx
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%x0 = extractelement <2 x i64> %x, i32 %i0
%x1 = extractelement <2 x i64> %x, i32 %i1
%r0 = insertelement <2 x i64> undef, i64 %x0, i32 0
%r1 = insertelement <2 x i64> %r0, i64 %x1, i32 1
ret <2 x i64> %r1
}
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE2: # BB#0:
; SSE2-NEXT: movslq %edi, %rax
; SSE2-NEXT: movslq %esi, %rsi
; SSE2-NEXT: movslq %edx, %rdx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movslq %ecx, %rcx
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movslq %edi, %rax
; SSSE3-NEXT: movslq %esi, %rsi
; SSSE3-NEXT: movslq %edx, %rdx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movslq %ecx, %rcx
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE41: # BB#0:
; SSE41-NEXT: movslq %edi, %rax
; SSE41-NEXT: movslq %esi, %rsi
; SSE41-NEXT: movslq %edx, %rdx
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movslq %ecx, %rcx
; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; AVX: # BB#0:
; AVX-NEXT: movslq %edi, %rax
; AVX-NEXT: movslq %esi, %rsi
; AVX-NEXT: movslq %edx, %rdx
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 %i0
%x1 = extractelement <4 x float> %x, i32 %i1
%x2 = extractelement <4 x float> %x, i32 %i2
%x3 = extractelement <4 x float> %x, i32 %i3
%r0 = insertelement <4 x float> undef, float %x0, i32 0
%r1 = insertelement <4 x float> %r0, float %x1, i32 1
%r2 = insertelement <4 x float> %r1, float %x2, i32 2
%r3 = insertelement <4 x float> %r2, float %x3, i32 3
ret <4 x float> %r3
}
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # BB#0:
; SSE2-NEXT: movslq %edi, %rax
; SSE2-NEXT: movslq %esi, %rsi
; SSE2-NEXT: movslq %edx, %rdx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movslq %ecx, %rcx
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movslq %edi, %rax
; SSSE3-NEXT: movslq %esi, %rsi
; SSSE3-NEXT: movslq %edx, %rdx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movslq %ecx, %rcx
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # BB#0:
; SSE41-NEXT: movslq %edi, %rax
; SSE41-NEXT: movslq %esi, %rsi
; SSE41-NEXT: movslq %edx, %rdx
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movslq %ecx, %rcx
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, -24(%rsp,%rsi,4), %xmm0
; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
; SSE41-NEXT: pinsrd $3, -24(%rsp,%rcx,4), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # BB#0:
; AVX-NEXT: movslq %edi, %rax
; AVX-NEXT: movslq %esi, %rsi
; AVX-NEXT: movslq %edx, %rdx
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpinsrd $1, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $3, -24(%rsp,%rcx,4), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 %i0
%x1 = extractelement <4 x i32> %x, i32 %i1
%x2 = extractelement <4 x i32> %x, i32 %i2
%x3 = extractelement <4 x i32> %x, i32 %i3
%r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
%r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
%r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
%r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
ret <4 x i32> %r3
}
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE2: # BB#0:
; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE2-NEXT: movswq %di, %rax
; SSE2-NEXT: movswq %si, %rsi
; SSE2-NEXT: movswq %dx, %rdx
; SSE2-NEXT: movswq %cx, %r10
; SSE2-NEXT: movswq %r8w, %r11
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movswq %r9w, %r8
; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rcx
; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdi
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %ecx
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: movd %edi, %xmm1
; SSE2-NEXT: movd %ecx, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-NEXT: movd %esi, %xmm1
; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSSE3: # BB#0:
; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSSE3-NEXT: movswq %di, %rax
; SSSE3-NEXT: movswq %si, %rsi
; SSSE3-NEXT: movswq %dx, %rdx
; SSSE3-NEXT: movswq %cx, %r10
; SSSE3-NEXT: movswq %r8w, %r11
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movswq %r9w, %r8
; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rcx
; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdi
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %ecx
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax
; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: movd %edi, %xmm1
; SSSE3-NEXT: movd %ecx, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSSE3-NEXT: movd %esi, %xmm1
; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE41: # BB#0:
; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE41-NEXT: movswq %di, %rax
; SSE41-NEXT: movswq %si, %rbx
; SSE41-NEXT: movswq %dx, %r11
; SSE41-NEXT: movswq %cx, %r10
; SSE41-NEXT: movswq %r8w, %rdi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movswq %r9w, %rcx
; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rdx
; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rsi
; SSE41-NEXT: movzwl -16(%rsp,%rdx,2), %edx
; SSE41-NEXT: movzwl -16(%rsp,%rsi,2), %esi
; SSE41-NEXT: movzwl -16(%rsp,%rax,2), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pinsrw $1, -16(%rsp,%rbx,2), %xmm0
; SSE41-NEXT: pinsrw $2, -16(%rsp,%r11,2), %xmm0
; SSE41-NEXT: pinsrw $3, -16(%rsp,%r10,2), %xmm0
; SSE41-NEXT: pinsrw $4, -16(%rsp,%rdi,2), %xmm0
; SSE41-NEXT: pinsrw $5, -16(%rsp,%rcx,2), %xmm0
; SSE41-NEXT: pinsrw $6, %edx, %xmm0
; SSE41-NEXT: pinsrw $7, %esi, %xmm0
; SSE41-NEXT: popq %rbx
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; AVX: # BB#0:
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %rbx
; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX-NEXT: movswq %di, %r10
; AVX-NEXT: movswq %si, %r11
; AVX-NEXT: movswq %dx, %r14
; AVX-NEXT: movswq %cx, %rcx
; AVX-NEXT: movswq %r8w, %rdi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movswq %r9w, %rax
; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rsi
; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rdx
; AVX-NEXT: movzwl -24(%rsp,%rsi,2), %esi
; AVX-NEXT: movzwl -24(%rsp,%rdx,2), %edx
; AVX-NEXT: movzwl -24(%rsp,%r10,2), %ebx
; AVX-NEXT: vmovd %ebx, %xmm0
; AVX-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $2, -24(%rsp,%r14,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $4, -24(%rsp,%rdi,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $7, %edx, %xmm0, %xmm0
; AVX-NEXT: popq %rbx
; AVX-NEXT: popq %r14
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%x1 = extractelement <8 x i16> %x, i16 %i1
%x2 = extractelement <8 x i16> %x, i16 %i2
%x3 = extractelement <8 x i16> %x, i16 %i3
%x4 = extractelement <8 x i16> %x, i16 %i4
%x5 = extractelement <8 x i16> %x, i16 %i5
%x6 = extractelement <8 x i16> %x, i16 %i6
%x7 = extractelement <8 x i16> %x, i16 %i7
%r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
%r1 = insertelement <8 x i16> %r0, i16 %x1, i32 1
%r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
%r3 = insertelement <8 x i16> %r2, i16 %x3, i32 3
%r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
%r5 = insertelement <8 x i16> %r4, i16 %x5, i32 5
%r6 = insertelement <8 x i16> %r5, i16 %x6, i32 6
%r7 = insertelement <8 x i16> %r6, i16 %x7, i32 7
ret <8 x i16> %r7
}
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # BB#0:
; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
; SSE2-NEXT: movzbl (%r10,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm15
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm8
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm9
; SSE2-NEXT: movsbq %dl, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm10
; SSE2-NEXT: movsbq %dil, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm11
; SSE2-NEXT: movsbq %r8b, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm7
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm12
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm13
; SSE2-NEXT: movsbq %cl, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm6
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm14
; SSE2-NEXT: movsbq %sil, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm5
; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm4
; SSE2-NEXT: movsbq %r9b, %rax
; SSE2-NEXT: movzbl (%rax,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
; SSSE3-NEXT: movzbl (%r10,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm15
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm8
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm9
; SSSE3-NEXT: movsbq %dl, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm10
; SSSE3-NEXT: movsbq %dil, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm11
; SSSE3-NEXT: movsbq %r8b, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm7
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm12
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm13
; SSSE3-NEXT: movsbq %cl, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm6
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm14
; SSSE3-NEXT: movsbq %sil, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm5
; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm4
; SSSE3-NEXT: movsbq %r9b, %rax
; SSSE3-NEXT: movzbl (%rax,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # BB#0:
; SSE41-NEXT: pushq %rbp
; SSE41-NEXT: pushq %r15
; SSE41-NEXT: pushq %r14
; SSE41-NEXT: pushq %r13
; SSE41-NEXT: pushq %r12
; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE41-NEXT: movsbq %dil, %r15
; SSE41-NEXT: movsbq %sil, %r14
; SSE41-NEXT: movsbq %dl, %r11
; SSE41-NEXT: movsbq %cl, %r10
; SSE41-NEXT: movsbq %r8b, %r8
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movsbq %r9b, %r9
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r12
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r13
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbx
; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; SSE41-NEXT: movzbl (%r15,%rax), %ecx
; SSE41-NEXT: movd %ecx, %xmm0
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r15
; SSE41-NEXT: pinsrb $1, (%r14,%rax), %xmm0
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r14
; SSE41-NEXT: pinsrb $2, (%r11,%rax), %xmm0
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r11
; SSE41-NEXT: pinsrb $3, (%r10,%rax), %xmm0
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
; SSE41-NEXT: pinsrb $4, (%r8,%rax), %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx
; SSE41-NEXT: pinsrb $5, (%r9,%rax), %xmm0
; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rdx
; SSE41-NEXT: movzbl (%r12,%rax), %esi
; SSE41-NEXT: movzbl (%r13,%rax), %edi
; SSE41-NEXT: movzbl (%rbp,%rax), %ebp
; SSE41-NEXT: movzbl (%rbx,%rax), %ebx
; SSE41-NEXT: movzbl (%r15,%rax), %r8d
; SSE41-NEXT: movzbl (%r14,%rax), %r9d
; SSE41-NEXT: movzbl (%r11,%rax), %r11d
; SSE41-NEXT: movzbl (%r10,%rax), %r10d
; SSE41-NEXT: movzbl (%rcx,%rax), %ecx
; SSE41-NEXT: movzbl (%rdx,%rax), %eax
; SSE41-NEXT: pinsrb $6, %esi, %xmm0
; SSE41-NEXT: pinsrb $7, %edi, %xmm0
; SSE41-NEXT: pinsrb $8, %ebp, %xmm0
; SSE41-NEXT: pinsrb $9, %ebx, %xmm0
; SSE41-NEXT: pinsrb $10, %r8d, %xmm0
; SSE41-NEXT: pinsrb $11, %r9d, %xmm0
; SSE41-NEXT: pinsrb $12, %r11d, %xmm0
; SSE41-NEXT: pinsrb $13, %r10d, %xmm0
; SSE41-NEXT: pinsrb $14, %ecx, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
; SSE41-NEXT: popq %rbx
; SSE41-NEXT: popq %r12
; SSE41-NEXT: popq %r13
; SSE41-NEXT: popq %r14
; SSE41-NEXT: popq %r15
; SSE41-NEXT: popq %rbp
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # BB#0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: pushq %r15
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %r13
; AVX-NEXT: pushq %r12
; AVX-NEXT: pushq %rbx
; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX-NEXT: movsbq %dil, %r10
; AVX-NEXT: movsbq %sil, %r11
; AVX-NEXT: movsbq %dl, %r14
; AVX-NEXT: movsbq %cl, %r15
; AVX-NEXT: movsbq %r8b, %r8
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movsbq %r9b, %r9
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r12
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r13
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rdi
; AVX-NEXT: movzbl (%r10,%rdi), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
; AVX-NEXT: vpinsrb $1, (%r11,%rdi), %xmm0, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r11
; AVX-NEXT: vpinsrb $2, (%r14,%rdi), %xmm0, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r14
; AVX-NEXT: vpinsrb $3, (%r15,%rdi), %xmm0, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r15
; AVX-NEXT: vpinsrb $4, (%r8,%rdi), %xmm0, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r8
; AVX-NEXT: vpinsrb $5, (%r9,%rdi), %xmm0, %xmm0
; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rsi
; AVX-NEXT: movzbl (%r12,%rdi), %edx
; AVX-NEXT: movzbl (%r13,%rdi), %ebx
; AVX-NEXT: movzbl (%rbp,%rdi), %ebp
; AVX-NEXT: movzbl (%rcx,%rdi), %ecx
; AVX-NEXT: movzbl (%r10,%rdi), %eax
; AVX-NEXT: movzbl (%r11,%rdi), %r9d
; AVX-NEXT: movzbl (%r14,%rdi), %r10d
; AVX-NEXT: movzbl (%r15,%rdi), %r11d
; AVX-NEXT: movzbl (%r8,%rdi), %r8d
; AVX-NEXT: movzbl (%rsi,%rdi), %esi
; AVX-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $7, %ebx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $13, %r11d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $14, %r8d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %esi, %xmm0, %xmm0
; AVX-NEXT: popq %rbx
; AVX-NEXT: popq %r12
; AVX-NEXT: popq %r13
; AVX-NEXT: popq %r14
; AVX-NEXT: popq %r15
; AVX-NEXT: popq %rbp
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %x, i8 %i0
%x1 = extractelement <16 x i8> %x, i8 %i1
%x2 = extractelement <16 x i8> %x, i8 %i2
%x3 = extractelement <16 x i8> %x, i8 %i3
%x4 = extractelement <16 x i8> %x, i8 %i4
%x5 = extractelement <16 x i8> %x, i8 %i5
%x6 = extractelement <16 x i8> %x, i8 %i6
%x7 = extractelement <16 x i8> %x, i8 %i7
%x8 = extractelement <16 x i8> %x, i8 %i8
%x9 = extractelement <16 x i8> %x, i8 %i9
%x10 = extractelement <16 x i8> %x, i8 %i10
%x11 = extractelement <16 x i8> %x, i8 %i11
%x12 = extractelement <16 x i8> %x, i8 %i12
%x13 = extractelement <16 x i8> %x, i8 %i13
%x14 = extractelement <16 x i8> %x, i8 %i14
%x15 = extractelement <16 x i8> %x, i8 %i15
%r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
%r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
%r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
%r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
%r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
%r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
%r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
%r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
%r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
%r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
%r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
%r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
%r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
%r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
%r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
%r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
ret <16 x i8> %r15
}
;
; Unary shuffle indices from memory
;
define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # BB#0:
; SSE2-NEXT: movslq (%rdi), %rax
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movslq 4(%rdi), %rcx
; SSE2-NEXT: movslq 8(%rdi), %rdx
; SSE2-NEXT: movslq 12(%rdi), %rsi
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # BB#0:
; SSSE3-NEXT: movslq (%rdi), %rax
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movslq 4(%rdi), %rcx
; SSSE3-NEXT: movslq 8(%rdi), %rdx
; SSSE3-NEXT: movslq 12(%rdi), %rsi
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # BB#0:
; SSE41-NEXT: movslq (%rdi), %rax
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movslq 4(%rdi), %rcx
; SSE41-NEXT: movslq 8(%rdi), %rdx
; SSE41-NEXT: movslq 12(%rdi), %rsi
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, -24(%rsp,%rcx,4), %xmm0
; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
; SSE41-NEXT: pinsrd $3, -24(%rsp,%rsi,4), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # BB#0:
; AVX-NEXT: movslq (%rdi), %rax
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq 4(%rdi), %rcx
; AVX-NEXT: movslq 8(%rdi), %rdx
; AVX-NEXT: movslq 12(%rdi), %rsi
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX-NEXT: retq
%p0 = getelementptr inbounds i32, i32* %i, i64 0
%p1 = getelementptr inbounds i32, i32* %i, i64 1
%p2 = getelementptr inbounds i32, i32* %i, i64 2
%p3 = getelementptr inbounds i32, i32* %i, i64 3
%i0 = load i32, i32* %p0, align 4
%i1 = load i32, i32* %p1, align 4
%i2 = load i32, i32* %p2, align 4
%i3 = load i32, i32* %p3, align 4
%x0 = extractelement <4 x i32> %x, i32 %i0
%x1 = extractelement <4 x i32> %x, i32 %i1
%x2 = extractelement <4 x i32> %x, i32 %i2
%x3 = extractelement <4 x i32> %x, i32 %i3
%r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
%r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
%r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
%r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
ret <4 x i32> %r3
}
define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # BB#0:
; SSE2-NEXT: movsbq (%rdi), %rcx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: movsbq 8(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm8
; SSE2-NEXT: movsbq 12(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm9
; SSE2-NEXT: movsbq 4(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm3
; SSE2-NEXT: movsbq 14(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm10
; SSE2-NEXT: movsbq 6(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm5
; SSE2-NEXT: movsbq 10(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm11
; SSE2-NEXT: movsbq 2(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm7
; SSE2-NEXT: movsbq 15(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm12
; SSE2-NEXT: movsbq 7(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm2
; SSE2-NEXT: movsbq 11(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm13
; SSE2-NEXT: movsbq 3(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm6
; SSE2-NEXT: movsbq 13(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm14
; SSE2-NEXT: movsbq 5(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm4
; SSE2-NEXT: movsbq 9(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
; SSE2-NEXT: movd %ecx, %xmm15
; SSE2-NEXT: movsbq 1(%rdi), %rcx
; SSE2-NEXT: movzbl (%rcx,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # BB#0:
; SSSE3-NEXT: movsbq (%rdi), %rcx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: movsbq 8(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm8
; SSSE3-NEXT: movsbq 12(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm9
; SSSE3-NEXT: movsbq 4(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm3
; SSSE3-NEXT: movsbq 14(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm10
; SSSE3-NEXT: movsbq 6(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm5
; SSSE3-NEXT: movsbq 10(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm11
; SSSE3-NEXT: movsbq 2(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm7
; SSSE3-NEXT: movsbq 15(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm12
; SSSE3-NEXT: movsbq 7(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm2
; SSSE3-NEXT: movsbq 11(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm13
; SSSE3-NEXT: movsbq 3(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm6
; SSSE3-NEXT: movsbq 13(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm14
; SSSE3-NEXT: movsbq 5(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm4
; SSSE3-NEXT: movsbq 9(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
; SSSE3-NEXT: movd %ecx, %xmm15
; SSSE3-NEXT: movsbq 1(%rdi), %rcx
; SSSE3-NEXT: movzbl (%rcx,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # BB#0:
; SSE41-NEXT: pushq %rbp
; SSE41-NEXT: pushq %r15
; SSE41-NEXT: pushq %r14
; SSE41-NEXT: pushq %r13
; SSE41-NEXT: pushq %r12
; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: movsbq (%rdi), %rax
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movsbq 1(%rdi), %r15
; SSE41-NEXT: movsbq 2(%rdi), %r8
; SSE41-NEXT: movsbq 3(%rdi), %r9
; SSE41-NEXT: movsbq 4(%rdi), %r10
; SSE41-NEXT: movsbq 5(%rdi), %r11
; SSE41-NEXT: movsbq 6(%rdi), %r14
; SSE41-NEXT: movsbq 7(%rdi), %r12
; SSE41-NEXT: movsbq 8(%rdi), %r13
; SSE41-NEXT: movsbq 9(%rdi), %rdx
; SSE41-NEXT: movsbq 10(%rdi), %rcx
; SSE41-NEXT: movsbq 11(%rdi), %rsi
; SSE41-NEXT: movsbq 12(%rdi), %rbx
; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
; SSE41-NEXT: movzbl (%rax,%rbp), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: movsbq 13(%rdi), %rax
; SSE41-NEXT: pinsrb $1, (%r15,%rbp), %xmm0
; SSE41-NEXT: movsbq 14(%rdi), %r15
; SSE41-NEXT: movsbq 15(%rdi), %rdi
; SSE41-NEXT: movzbl (%rdi,%rbp), %edi
; SSE41-NEXT: movzbl (%r15,%rbp), %r15d
; SSE41-NEXT: movzbl (%rax,%rbp), %eax
; SSE41-NEXT: movzbl (%rbx,%rbp), %ebx
; SSE41-NEXT: movzbl (%rsi,%rbp), %esi
; SSE41-NEXT: movzbl (%rcx,%rbp), %ecx
; SSE41-NEXT: movzbl (%rdx,%rbp), %edx
; SSE41-NEXT: movzbl (%r13,%rbp), %r13d
; SSE41-NEXT: movzbl (%r12,%rbp), %r12d
; SSE41-NEXT: movzbl (%r14,%rbp), %r14d
; SSE41-NEXT: movzbl (%r11,%rbp), %r11d
; SSE41-NEXT: movzbl (%r10,%rbp), %r10d
; SSE41-NEXT: movzbl (%r9,%rbp), %r9d
; SSE41-NEXT: movzbl (%r8,%rbp), %ebp
; SSE41-NEXT: pinsrb $2, %ebp, %xmm0
; SSE41-NEXT: pinsrb $3, %r9d, %xmm0
; SSE41-NEXT: pinsrb $4, %r10d, %xmm0
; SSE41-NEXT: pinsrb $5, %r11d, %xmm0
; SSE41-NEXT: pinsrb $6, %r14d, %xmm0
; SSE41-NEXT: pinsrb $7, %r12d, %xmm0
; SSE41-NEXT: pinsrb $8, %r13d, %xmm0
; SSE41-NEXT: pinsrb $9, %edx, %xmm0
; SSE41-NEXT: pinsrb $10, %ecx, %xmm0
; SSE41-NEXT: pinsrb $11, %esi, %xmm0
; SSE41-NEXT: pinsrb $12, %ebx, %xmm0
; SSE41-NEXT: pinsrb $13, %eax, %xmm0
; SSE41-NEXT: pinsrb $14, %r15d, %xmm0
; SSE41-NEXT: pinsrb $15, %edi, %xmm0
; SSE41-NEXT: popq %rbx
; SSE41-NEXT: popq %r12
; SSE41-NEXT: popq %r13
; SSE41-NEXT: popq %r14
; SSE41-NEXT: popq %r15
; SSE41-NEXT: popq %rbp
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # BB#0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: pushq %r15
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %r13
; AVX-NEXT: pushq %r12
; AVX-NEXT: pushq %rbx
; AVX-NEXT: movsbq (%rdi), %rsi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movsbq 1(%rdi), %r15
; AVX-NEXT: movsbq 2(%rdi), %r8
; AVX-NEXT: movsbq 3(%rdi), %r9
; AVX-NEXT: movsbq 4(%rdi), %r10
; AVX-NEXT: movsbq 5(%rdi), %r11
; AVX-NEXT: movsbq 6(%rdi), %r14
; AVX-NEXT: movsbq 7(%rdi), %r12
; AVX-NEXT: movsbq 8(%rdi), %r13
; AVX-NEXT: movsbq 9(%rdi), %rdx
; AVX-NEXT: movsbq 10(%rdi), %rax
; AVX-NEXT: movsbq 11(%rdi), %rcx
; AVX-NEXT: movsbq 12(%rdi), %rbx
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
; AVX-NEXT: movzbl (%rsi,%rbp), %esi
; AVX-NEXT: vmovd %esi, %xmm0
; AVX-NEXT: movsbq 13(%rdi), %rsi
; AVX-NEXT: vpinsrb $1, (%r15,%rbp), %xmm0, %xmm0
; AVX-NEXT: movsbq 14(%rdi), %r15
; AVX-NEXT: movsbq 15(%rdi), %rdi
; AVX-NEXT: movzbl (%rdi,%rbp), %edi
; AVX-NEXT: movzbl (%r15,%rbp), %r15d
; AVX-NEXT: movzbl (%rsi,%rbp), %esi
; AVX-NEXT: movzbl (%rbx,%rbp), %ebx
; AVX-NEXT: movzbl (%rcx,%rbp), %ecx
; AVX-NEXT: movzbl (%rax,%rbp), %eax
; AVX-NEXT: movzbl (%rdx,%rbp), %edx
; AVX-NEXT: movzbl (%r13,%rbp), %r13d
; AVX-NEXT: movzbl (%r12,%rbp), %r12d
; AVX-NEXT: movzbl (%r14,%rbp), %r14d
; AVX-NEXT: movzbl (%r11,%rbp), %r11d
; AVX-NEXT: movzbl (%r10,%rbp), %r10d
; AVX-NEXT: movzbl (%r9,%rbp), %r9d
; AVX-NEXT: movzbl (%r8,%rbp), %ebp
; AVX-NEXT: vpinsrb $2, %ebp, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $7, %r12d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $8, %r13d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $12, %ebx, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $13, %esi, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $14, %r15d, %xmm0, %xmm0
; AVX-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0
; AVX-NEXT: popq %rbx
; AVX-NEXT: popq %r12
; AVX-NEXT: popq %r13
; AVX-NEXT: popq %r14
; AVX-NEXT: popq %r15
; AVX-NEXT: popq %rbp
; AVX-NEXT: retq
%p0 = getelementptr inbounds i8, i8* %i, i64 0
%p1 = getelementptr inbounds i8, i8* %i, i64 1
%p2 = getelementptr inbounds i8, i8* %i, i64 2
%p3 = getelementptr inbounds i8, i8* %i, i64 3
%p4 = getelementptr inbounds i8, i8* %i, i64 4
%p5 = getelementptr inbounds i8, i8* %i, i64 5
%p6 = getelementptr inbounds i8, i8* %i, i64 6
%p7 = getelementptr inbounds i8, i8* %i, i64 7
%p8 = getelementptr inbounds i8, i8* %i, i64 8
%p9 = getelementptr inbounds i8, i8* %i, i64 9
%p10 = getelementptr inbounds i8, i8* %i, i64 10
%p11 = getelementptr inbounds i8, i8* %i, i64 11
%p12 = getelementptr inbounds i8, i8* %i, i64 12
%p13 = getelementptr inbounds i8, i8* %i, i64 13
%p14 = getelementptr inbounds i8, i8* %i, i64 14
%p15 = getelementptr inbounds i8, i8* %i, i64 15
%i0 = load i8, i8* %p0 , align 4
%i1 = load i8, i8* %p1 , align 4
%i2 = load i8, i8* %p2 , align 4
%i3 = load i8, i8* %p3 , align 4
%i4 = load i8, i8* %p4 , align 4
%i5 = load i8, i8* %p5 , align 4
%i6 = load i8, i8* %p6 , align 4
%i7 = load i8, i8* %p7 , align 4
%i8 = load i8, i8* %p8 , align 4
%i9 = load i8, i8* %p9 , align 4
%i10 = load i8, i8* %p10, align 4
%i11 = load i8, i8* %p11, align 4
%i12 = load i8, i8* %p12, align 4
%i13 = load i8, i8* %p13, align 4
%i14 = load i8, i8* %p14, align 4
%i15 = load i8, i8* %p15, align 4
%x0 = extractelement <16 x i8> %x, i8 %i0
%x1 = extractelement <16 x i8> %x, i8 %i1
%x2 = extractelement <16 x i8> %x, i8 %i2
%x3 = extractelement <16 x i8> %x, i8 %i3
%x4 = extractelement <16 x i8> %x, i8 %i4
%x5 = extractelement <16 x i8> %x, i8 %i5
%x6 = extractelement <16 x i8> %x, i8 %i6
%x7 = extractelement <16 x i8> %x, i8 %i7
%x8 = extractelement <16 x i8> %x, i8 %i8
%x9 = extractelement <16 x i8> %x, i8 %i9
%x10 = extractelement <16 x i8> %x, i8 %i10
%x11 = extractelement <16 x i8> %x, i8 %i11
%x12 = extractelement <16 x i8> %x, i8 %i12
%x13 = extractelement <16 x i8> %x, i8 %i13
%x14 = extractelement <16 x i8> %x, i8 %i14
%x15 = extractelement <16 x i8> %x, i8 %i15
%r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
%r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
%r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
%r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
%r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
%r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
%r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
%r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
%r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
%r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
%r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
%r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
%r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
%r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
%r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
%r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
ret <16 x i8> %r15
}
;
; Binary shuffle indices from registers
;
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; SSE: # BB#0:
; SSE-NEXT: movslq %edi, %rax
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movslq %edx, %rdx
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movslq %ecx, %rcx
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; AVX: # BB#0:
; AVX-NEXT: movslq %edi, %rax
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq %edx, %rdx
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 %i0
%x1 = extractelement <4 x float> %x, i32 %i1
%y2 = extractelement <4 x float> %y, i32 %i2
%x3 = extractelement <4 x float> %x, i32 %i3
%r0 = insertelement <4 x float> undef, float %x0, i32 0
%r1 = insertelement <4 x float> %r0, float 0.0, i32 1
%r2 = insertelement <4 x float> %r1, float %y2, i32 2
%r3 = insertelement <4 x float> %r2, float %x3, i32 3
ret <4 x float> %r3
}
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE2: # BB#0:
; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE2-NEXT: movswq %di, %r10
; SSE2-NEXT: movswq %si, %rsi
; SSE2-NEXT: movswq %dx, %r11
; SSE2-NEXT: movswq %cx, %rcx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movswq %r8w, %rdi
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movswq %r9w, %rax
; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
; SSE2-NEXT: xorl %edx, %edx
; SSE2-NEXT: movd %edx, %xmm0
; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movd %esi, %xmm2
; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax
; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSSE3: # BB#0:
; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSSE3-NEXT: movswq %di, %r10
; SSSE3-NEXT: movswq %si, %rsi
; SSSE3-NEXT: movswq %dx, %r11
; SSSE3-NEXT: movswq %cx, %rcx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movswq %r8w, %rdi
; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movswq %r9w, %rax
; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
; SSSE3-NEXT: xorl %edx, %edx
; SSSE3-NEXT: movd %edx, %xmm0
; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movd %esi, %xmm2
; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax
; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Retrying after upstream changes. Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. Whem merging stores, search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and generally the output CodeGen (with some exceptions). Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seemed sufficient to not cause regressions in tests. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable. Some tests relying on the order were changed to use volatile memory operations Noteworthy tests: CodeGen/AArch64/argument-blocks.ll - It's not entirely clear what the test_varargs_stackalign test is supposed to be asserting, but the new code looks right. CodeGen/AArch64/arm64-memset-inline.lli - CodeGen/AArch64/arm64-stur.ll - CodeGen/ARM/memset-inline.ll - The backend now generates *worse* code due to store merging succeeding, as we do do a 16-byte constant-zero store efficiently. CodeGen/AArch64/merge-store.ll - Improved, but there still seems to be an extraneous vector insert from an element to itself? CodeGen/PowerPC/ppc64-align-long-double.ll - Worse code emitted in this case, due to the improved store->load forwarding. CodeGen/X86/dag-merge-fast-accesses.ll - CodeGen/X86/MergeConsecutiveStores.ll - CodeGen/X86/stores-merging.ll - CodeGen/Mips/load-store-left-right.ll - Restored correct merging of non-aligned stores CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll - Improved. Correctly merges buffer_store_dword calls CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll - Improved. Sidesteps loading a stored value and merges two stores CodeGen/X86/pr18023.ll - This test has been removed, as it was asserting incorrect behavior. Non-volatile stores *CAN* be moved past volatile loads, and now are. CodeGen/X86/vector-idiv.ll - CodeGen/X86/vector-lzcnt-128.ll - It's basically impossible to tell what these tests are actually testing. But, looks like the code got better due to the memory operations being recognized as non-aliasing. CodeGen/X86/win32-eh.ll - Both loads of the securitycookie are now merged. CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot-compute.ll - This test appears to work but no longer exhibits the spill behavior. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel Differential Revision: https://reviews.llvm.org/D14834 llvm-svn: 284151
2016-10-14 03:20:16 +08:00
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE41: # BB#0:
; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; SSE41-NEXT: movswq %di, %rax
; SSE41-NEXT: movswq %si, %rsi
; SSE41-NEXT: movswq %dx, %rdx
; SSE41-NEXT: movswq %cx, %r10
; SSE41-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movswq %r8w, %rdi
; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movswq %r9w, %rcx
; SSE41-NEXT: movzwl -40(%rsp,%rax,2), %eax
; SSE41-NEXT: movd %eax, %xmm1
; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm1
; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm1
; SSE41-NEXT: pinsrw $3, -24(%rsp,%r10,2), %xmm1
; SSE41-NEXT: pinsrw $4, -40(%rsp,%rdi,2), %xmm1
; SSE41-NEXT: pinsrw $5, -24(%rsp,%rcx,2), %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX1: # BB#0:
; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX1-NEXT: movswq %di, %r10
; AVX1-NEXT: movswq %si, %r11
; AVX1-NEXT: movswq %dx, %rdx
; AVX1-NEXT: movswq %cx, %rcx
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movswq %r8w, %rdi
; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movswq %r9w, %rax
; AVX1-NEXT: movzwl -40(%rsp,%r10,2), %esi
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $4, -40(%rsp,%rdi,2), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX2: # BB#0:
; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX2-NEXT: movswq %di, %r10
; AVX2-NEXT: movswq %si, %r11
; AVX2-NEXT: movswq %dx, %rdx
; AVX2-NEXT: movswq %cx, %rcx
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movswq %r8w, %rdi
; AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movswq %r9w, %rax
; AVX2-NEXT: movzwl -40(%rsp,%r10,2), %esi
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $4, -40(%rsp,%rdi,2), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%y1 = extractelement <8 x i16> %y, i16 %i1
%x2 = extractelement <8 x i16> %x, i16 %i2
%y3 = extractelement <8 x i16> %y, i16 %i3
%x4 = extractelement <8 x i16> %x, i16 %i4
%y5 = extractelement <8 x i16> %y, i16 %i5
%x6 = extractelement <8 x i16> %x, i16 %i6
%x7 = extractelement <8 x i16> %x, i16 %i7
%r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
%r1 = insertelement <8 x i16> %r0, i16 %y1, i32 1
%r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
%r3 = insertelement <8 x i16> %r2, i16 %y3, i32 3
%r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
%r5 = insertelement <8 x i16> %r4, i16 %y5, i32 5
%r6 = insertelement <8 x i16> %r5, i16 0, i32 6
%r7 = insertelement <8 x i16> %r6, i16 0, i32 7
ret <8 x i16> %r7
}