llvm-project/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll

1403 lines
64 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
;
; Unary shuffle indices from registers
;
define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
; SSE: # %bb.0:
; SSE-NEXT: andl $1, %esi
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
; AVX: # %bb.0:
; AVX-NEXT: andl $1, %esi
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT: retq
%x0 = extractelement <2 x double> %x, i64 %i0
%x1 = extractelement <2 x double> %x, i64 %i1
%r0 = insertelement <2 x double> undef, double %x0, i32 0
%r1 = insertelement <2 x double> %r0, double %x1, i32 1
ret <2 x double> %r1
}
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $1, %esi
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: retq
%x0 = extractelement <2 x i64> %x, i32 %i0
%x1 = extractelement <2 x i64> %x, i32 %i1
%r0 = insertelement <2 x i64> undef, i64 %x0, i32 0
%r1 = insertelement <2 x i64> %r0, i64 %x1, i32 1
ret <2 x i64> %r1
}
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: andl $3, %edx
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: andl $3, %ecx
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: andl $3, %edx
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE41: # %bb.0:
; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
; SSE41-NEXT: andl $3, %ecx
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
; AVX-NEXT: andl $3, %ecx
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 %i0
%x1 = extractelement <4 x float> %x, i32 %i1
%x2 = extractelement <4 x float> %x, i32 %i2
%x3 = extractelement <4 x float> %x, i32 %i3
%r0 = insertelement <4 x float> undef, float %x0, i32 0
%r1 = insertelement <4 x float> %r0, float %x1, i32 1
%r2 = insertelement <4 x float> %r1, float %x2, i32 2
%r3 = insertelement <4 x float> %r2, float %x3, i32 3
ret <4 x float> %r3
}
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $3, %ecx
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # %bb.0:
; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: andl $3, %ecx
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, -24(%rsp,%rsi,4), %xmm0
; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
; SSE41-NEXT: pinsrd $3, -24(%rsp,%rcx,4), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %ecx
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpinsrd $1, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $3, -24(%rsp,%rcx,4), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 %i0
%x1 = extractelement <4 x i32> %x, i32 %i1
%x2 = extractelement <4 x i32> %x, i32 %i2
%x3 = extractelement <4 x i32> %x, i32 %i3
%r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
%r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
%r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
%r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
ret <4 x i32> %r3
}
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; SSE2-NEXT: andl $7, %r10d
; SSE2-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $7, %eax
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
; SSE2-NEXT: andl $7, %ecx
; SSE2-NEXT: andl $7, %r8d
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $7, %r9d
; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm1
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm2
; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %ecx
; SSE2-NEXT: movd %ecx, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; SSSE3-NEXT: andl $7, %r10d
; SSSE3-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $7, %eax
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
; SSSE3-NEXT: andl $7, %ecx
; SSSE3-NEXT: andl $7, %r8d
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $7, %r9d
; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm2
; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm1
; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %ecx
; SSSE3-NEXT: movd %ecx, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE41: # %bb.0:
; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; SSE41-NEXT: andl $7, %r10d
; SSE41-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $7, %eax
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
; SSE41-NEXT: andl $7, %ecx
; SSE41-NEXT: andl $7, %r8d
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: andl $7, %r9d
; SSE41-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0
; SSE41-NEXT: pinsrw $2, -24(%rsp,%rdx,2), %xmm0
; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0
; SSE41-NEXT: pinsrw $4, -24(%rsp,%r8,2), %xmm0
; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0
; SSE41-NEXT: pinsrw $6, -24(%rsp,%rax,2), %xmm0
; SSE41-NEXT: pinsrw $7, -24(%rsp,%r10,2), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: movzwl {{[0-9]+}}(%rsp), %r10d
; AVX-NEXT: andl $7, %r10d
; AVX-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $7, %eax
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
; AVX-NEXT: andl $7, %ecx
; AVX-NEXT: andl $7, %r8d
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $7, %r9d
; AVX-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; AVX-NEXT: vmovd %edi, %xmm0
; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $7, -24(%rsp,%r10,2), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%x1 = extractelement <8 x i16> %x, i16 %i1
%x2 = extractelement <8 x i16> %x, i16 %i2
%x3 = extractelement <8 x i16> %x, i16 %i3
%x4 = extractelement <8 x i16> %x, i16 %i4
%x5 = extractelement <8 x i16> %x, i16 %i5
%x6 = extractelement <8 x i16> %x, i16 %i6
%x7 = extractelement <8 x i16> %x, i16 %i7
%r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
%r1 = insertelement <8 x i16> %r0, i16 %x1, i32 1
%r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
%r3 = insertelement <8 x i16> %r2, i16 %x3, i32 3
%r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
%r5 = insertelement <8 x i16> %r4, i16 %x5, i32 5
%r6 = insertelement <8 x i16> %r5, i16 %x6, i32 6
%r7 = insertelement <8 x i16> %r6, i16 %x7, i32 7
ret <8 x i16> %r7
}
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm8
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movd %eax, %xmm15
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm9
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm10
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm7
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm11
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm6
; SSE2-NEXT: andl $15, %ecx
; SSE2-NEXT: movzbl -24(%rsp,%rcx), %eax
; SSE2-NEXT: movd %eax, %xmm12
; SSE2-NEXT: andl $15, %edx
; SSE2-NEXT: movzbl -24(%rsp,%rdx), %eax
; SSE2-NEXT: movd %eax, %xmm5
; SSE2-NEXT: andl $15, %esi
; SSE2-NEXT: movzbl -24(%rsp,%rsi), %eax
; SSE2-NEXT: movd %eax, %xmm13
; SSE2-NEXT: andl $15, %edi
; SSE2-NEXT: movzbl -24(%rsp,%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: andl $15, %r9d
; SSE2-NEXT: movzbl -24(%rsp,%r9), %eax
; SSE2-NEXT: movd %eax, %xmm14
; SSE2-NEXT: andl $15, %r8d
; SSE2-NEXT: movzbl -24(%rsp,%r8), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm4
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm8
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movd %eax, %xmm15
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm9
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm10
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm7
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm11
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm6
; SSSE3-NEXT: andl $15, %ecx
; SSSE3-NEXT: movzbl -24(%rsp,%rcx), %eax
; SSSE3-NEXT: movd %eax, %xmm12
; SSSE3-NEXT: andl $15, %edx
; SSSE3-NEXT: movzbl -24(%rsp,%rdx), %eax
; SSSE3-NEXT: movd %eax, %xmm5
; SSSE3-NEXT: andl $15, %esi
; SSSE3-NEXT: movzbl -24(%rsp,%rsi), %eax
; SSSE3-NEXT: movd %eax, %xmm13
; SSSE3-NEXT: andl $15, %edi
; SSSE3-NEXT: movzbl -24(%rsp,%rdi), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: andl $15, %r9d
; SSSE3-NEXT: movzbl -24(%rsp,%r9), %eax
; SSSE3-NEXT: movd %eax, %xmm14
; SSSE3-NEXT: andl $15, %r8d
; SSSE3-NEXT: movzbl -24(%rsp,%r8), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm4
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # %bb.0:
; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $15, %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movzbl -24(%rsp,%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: andl $15, %esi
; SSE41-NEXT: pinsrb $1, -24(%rsp,%rsi), %xmm0
; SSE41-NEXT: andl $15, %edx
; SSE41-NEXT: pinsrb $2, -24(%rsp,%rdx), %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 296476
2017-02-28 22:24:15 +08:00
; SSE41-NEXT: andl $15, %ecx
; SSE41-NEXT: pinsrb $3, -24(%rsp,%rcx), %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE41-NEXT: andl $15, %r8d
; SSE41-NEXT: pinsrb $4, -24(%rsp,%r8), %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE41-NEXT: andl $15, %r9d
; SSE41-NEXT: pinsrb $5, -24(%rsp,%r9), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $6, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $7, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $8, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $9, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $10, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $11, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $12, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $13, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $14, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $15, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movzbl -24(%rsp,%rdi), %eax
; AVX-NEXT: vmovd %eax, %xmm0
; AVX-NEXT: andl $15, %esi
; AVX-NEXT: vpinsrb $1, -24(%rsp,%rsi), %xmm0, %xmm0
; AVX-NEXT: andl $15, %edx
; AVX-NEXT: vpinsrb $2, -24(%rsp,%rdx), %xmm0, %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 296476
2017-02-28 22:24:15 +08:00
; AVX-NEXT: andl $15, %ecx
; AVX-NEXT: vpinsrb $3, -24(%rsp,%rcx), %xmm0, %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; AVX-NEXT: andl $15, %r8d
; AVX-NEXT: vpinsrb $4, -24(%rsp,%r8), %xmm0, %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; AVX-NEXT: andl $15, %r9d
; AVX-NEXT: vpinsrb $5, -24(%rsp,%r9), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $15, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %x, i8 %i0
%x1 = extractelement <16 x i8> %x, i8 %i1
%x2 = extractelement <16 x i8> %x, i8 %i2
%x3 = extractelement <16 x i8> %x, i8 %i3
%x4 = extractelement <16 x i8> %x, i8 %i4
%x5 = extractelement <16 x i8> %x, i8 %i5
%x6 = extractelement <16 x i8> %x, i8 %i6
%x7 = extractelement <16 x i8> %x, i8 %i7
%x8 = extractelement <16 x i8> %x, i8 %i8
%x9 = extractelement <16 x i8> %x, i8 %i9
%x10 = extractelement <16 x i8> %x, i8 %i10
%x11 = extractelement <16 x i8> %x, i8 %i11
%x12 = extractelement <16 x i8> %x, i8 %i12
%x13 = extractelement <16 x i8> %x, i8 %i13
%x14 = extractelement <16 x i8> %x, i8 %i14
%x15 = extractelement <16 x i8> %x, i8 %i15
%r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
%r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
%r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
%r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
%r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
%r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
%r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
%r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
%r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
%r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
%r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
%r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
%r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
%r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
%r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
%r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
ret <16 x i8> %r15
}
;
; Unary shuffle indices from memory
;
define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: movl 4(%rdi), %ecx
; SSE2-NEXT: andl $3, %eax
; SSE2-NEXT: andl $3, %ecx
; SSE2-NEXT: movl 8(%rdi), %edx
; SSE2-NEXT: andl $3, %edx
; SSE2-NEXT: movl 12(%rdi), %esi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movl (%rdi), %eax
; SSSE3-NEXT: movl 4(%rdi), %ecx
; SSSE3-NEXT: andl $3, %eax
; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movl 8(%rdi), %edx
; SSSE3-NEXT: andl $3, %edx
; SSSE3-NEXT: movl 12(%rdi), %esi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # %bb.0:
; SSE41-NEXT: movl (%rdi), %eax
; SSE41-NEXT: movl 4(%rdi), %ecx
; SSE41-NEXT: andl $3, %eax
; SSE41-NEXT: andl $3, %ecx
; SSE41-NEXT: movl 8(%rdi), %edx
; SSE41-NEXT: andl $3, %edx
; SSE41-NEXT: movl 12(%rdi), %esi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: pinsrd $1, -24(%rsp,%rcx,4), %xmm0
; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
; SSE41-NEXT: pinsrd $3, -24(%rsp,%rsi,4), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # %bb.0:
; AVX-NEXT: movl (%rdi), %eax
; AVX-NEXT: movl 4(%rdi), %ecx
; AVX-NEXT: andl $3, %eax
; AVX-NEXT: andl $3, %ecx
; AVX-NEXT: movl 8(%rdi), %edx
; AVX-NEXT: andl $3, %edx
; AVX-NEXT: movl 12(%rdi), %esi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
; AVX-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX-NEXT: retq
%p0 = getelementptr inbounds i32, i32* %i, i64 0
%p1 = getelementptr inbounds i32, i32* %i, i64 1
%p2 = getelementptr inbounds i32, i32* %i, i64 2
%p3 = getelementptr inbounds i32, i32* %i, i64 3
%i0 = load i32, i32* %p0, align 4
%i1 = load i32, i32* %p1, align 4
%i2 = load i32, i32* %p2, align 4
%i3 = load i32, i32* %p3, align 4
%x0 = extractelement <4 x i32> %x, i32 %i0
%x1 = extractelement <4 x i32> %x, i32 %i1
%x2 = extractelement <4 x i32> %x, i32 %i2
%x3 = extractelement <4 x i32> %x, i32 %i3
%r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
%r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
%r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
%r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
ret <4 x i32> %r3
}
define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
; SSE2-NEXT: pushq %r13
; SSE2-NEXT: pushq %r12
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; SSE2-NEXT: movzbl 1(%rdi), %r9d
; SSE2-NEXT: movzbl 2(%rdi), %r10d
; SSE2-NEXT: movzbl 3(%rdi), %r11d
; SSE2-NEXT: movzbl 4(%rdi), %r14d
; SSE2-NEXT: movzbl 5(%rdi), %r15d
; SSE2-NEXT: movzbl 6(%rdi), %r12d
; SSE2-NEXT: movzbl 7(%rdi), %r13d
; SSE2-NEXT: movzbl 8(%rdi), %ebx
; SSE2-NEXT: movzbl 9(%rdi), %r8d
; SSE2-NEXT: movzbl 10(%rdi), %ecx
; SSE2-NEXT: movzbl 11(%rdi), %edx
; SSE2-NEXT: movzbl 12(%rdi), %esi
; SSE2-NEXT: movzbl 13(%rdi), %ebp
; SSE2-NEXT: movzbl 14(%rdi), %eax
; SSE2-NEXT: movzbl 15(%rdi), %edi
; SSE2-NEXT: andl $15, %edi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -24(%rsp,%rdi), %edi
; SSE2-NEXT: movd %edi, %xmm8
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
; SSE2-NEXT: movd %eax, %xmm15
; SSE2-NEXT: andl $15, %ebp
; SSE2-NEXT: movzbl -24(%rsp,%rbp), %eax
; SSE2-NEXT: movd %eax, %xmm9
; SSE2-NEXT: andl $15, %esi
; SSE2-NEXT: movzbl -24(%rsp,%rsi), %eax
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: andl $15, %edx
; SSE2-NEXT: movzbl -24(%rsp,%rdx), %eax
; SSE2-NEXT: movd %eax, %xmm10
; SSE2-NEXT: andl $15, %ecx
; SSE2-NEXT: movzbl -24(%rsp,%rcx), %eax
; SSE2-NEXT: movd %eax, %xmm7
; SSE2-NEXT: andl $15, %r8d
; SSE2-NEXT: movzbl -24(%rsp,%r8), %eax
; SSE2-NEXT: movd %eax, %xmm11
; SSE2-NEXT: andl $15, %ebx
; SSE2-NEXT: movzbl -24(%rsp,%rbx), %eax
; SSE2-NEXT: movd %eax, %xmm6
; SSE2-NEXT: andl $15, %r13d
; SSE2-NEXT: movzbl -24(%rsp,%r13), %eax
; SSE2-NEXT: movd %eax, %xmm12
; SSE2-NEXT: andl $15, %r12d
; SSE2-NEXT: movzbl -24(%rsp,%r12), %eax
; SSE2-NEXT: movd %eax, %xmm5
; SSE2-NEXT: andl $15, %r15d
; SSE2-NEXT: movzbl -24(%rsp,%r15), %eax
; SSE2-NEXT: movd %eax, %xmm13
; SSE2-NEXT: andl $15, %r14d
; SSE2-NEXT: movzbl -24(%rsp,%r14), %eax
; SSE2-NEXT: movd %eax, %xmm4
; SSE2-NEXT: andl $15, %r11d
; SSE2-NEXT: movzbl -24(%rsp,%r11), %eax
; SSE2-NEXT: movd %eax, %xmm14
; SSE2-NEXT: andl $15, %r10d
; SSE2-NEXT: movzbl -24(%rsp,%r10), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: andl $15, %r9d
; SSE2-NEXT: movzbl -24(%rsp,%r9), %eax
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; SSE2-NEXT: andl $15, %eax
; SSE2-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %r12
; SSE2-NEXT: popq %r13
; SSE2-NEXT: popq %r14
; SSE2-NEXT: popq %r15
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # %bb.0:
; SSSE3-NEXT: pushq %rbp
; SSSE3-NEXT: pushq %r15
; SSSE3-NEXT: pushq %r14
; SSSE3-NEXT: pushq %r13
; SSSE3-NEXT: pushq %r12
; SSSE3-NEXT: pushq %rbx
; SSSE3-NEXT: movzbl (%rdi), %eax
; SSSE3-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; SSSE3-NEXT: movzbl 1(%rdi), %r9d
; SSSE3-NEXT: movzbl 2(%rdi), %r10d
; SSSE3-NEXT: movzbl 3(%rdi), %r11d
; SSSE3-NEXT: movzbl 4(%rdi), %r14d
; SSSE3-NEXT: movzbl 5(%rdi), %r15d
; SSSE3-NEXT: movzbl 6(%rdi), %r12d
; SSSE3-NEXT: movzbl 7(%rdi), %r13d
; SSSE3-NEXT: movzbl 8(%rdi), %ebx
; SSSE3-NEXT: movzbl 9(%rdi), %r8d
; SSSE3-NEXT: movzbl 10(%rdi), %ecx
; SSSE3-NEXT: movzbl 11(%rdi), %edx
; SSSE3-NEXT: movzbl 12(%rdi), %esi
; SSSE3-NEXT: movzbl 13(%rdi), %ebp
; SSSE3-NEXT: movzbl 14(%rdi), %eax
; SSSE3-NEXT: movzbl 15(%rdi), %edi
; SSSE3-NEXT: andl $15, %edi
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl -24(%rsp,%rdi), %edi
; SSSE3-NEXT: movd %edi, %xmm8
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
; SSSE3-NEXT: movd %eax, %xmm15
; SSSE3-NEXT: andl $15, %ebp
; SSSE3-NEXT: movzbl -24(%rsp,%rbp), %eax
; SSSE3-NEXT: movd %eax, %xmm9
; SSSE3-NEXT: andl $15, %esi
; SSSE3-NEXT: movzbl -24(%rsp,%rsi), %eax
; SSSE3-NEXT: movd %eax, %xmm3
; SSSE3-NEXT: andl $15, %edx
; SSSE3-NEXT: movzbl -24(%rsp,%rdx), %eax
; SSSE3-NEXT: movd %eax, %xmm10
; SSSE3-NEXT: andl $15, %ecx
; SSSE3-NEXT: movzbl -24(%rsp,%rcx), %eax
; SSSE3-NEXT: movd %eax, %xmm7
; SSSE3-NEXT: andl $15, %r8d
; SSSE3-NEXT: movzbl -24(%rsp,%r8), %eax
; SSSE3-NEXT: movd %eax, %xmm11
; SSSE3-NEXT: andl $15, %ebx
; SSSE3-NEXT: movzbl -24(%rsp,%rbx), %eax
; SSSE3-NEXT: movd %eax, %xmm6
; SSSE3-NEXT: andl $15, %r13d
; SSSE3-NEXT: movzbl -24(%rsp,%r13), %eax
; SSSE3-NEXT: movd %eax, %xmm12
; SSSE3-NEXT: andl $15, %r12d
; SSSE3-NEXT: movzbl -24(%rsp,%r12), %eax
; SSSE3-NEXT: movd %eax, %xmm5
; SSSE3-NEXT: andl $15, %r15d
; SSSE3-NEXT: movzbl -24(%rsp,%r15), %eax
; SSSE3-NEXT: movd %eax, %xmm13
; SSSE3-NEXT: andl $15, %r14d
; SSSE3-NEXT: movzbl -24(%rsp,%r14), %eax
; SSSE3-NEXT: movd %eax, %xmm4
; SSSE3-NEXT: andl $15, %r11d
; SSSE3-NEXT: movzbl -24(%rsp,%r11), %eax
; SSSE3-NEXT: movd %eax, %xmm14
; SSSE3-NEXT: andl $15, %r10d
; SSSE3-NEXT: movzbl -24(%rsp,%r10), %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: andl $15, %r9d
; SSSE3-NEXT: movzbl -24(%rsp,%r9), %eax
; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; SSSE3-NEXT: andl $15, %eax
; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSSE3-NEXT: popq %rbx
; SSSE3-NEXT: popq %r12
; SSSE3-NEXT: popq %r13
; SSSE3-NEXT: popq %r14
; SSSE3-NEXT: popq %r15
; SSSE3-NEXT: popq %rbp
; SSSE3-NEXT: retq
;
; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # %bb.0:
; SSE41-NEXT: pushq %rbp
; SSE41-NEXT: pushq %r15
; SSE41-NEXT: pushq %r14
; SSE41-NEXT: pushq %r13
; SSE41-NEXT: pushq %r12
; SSE41-NEXT: pushq %rbx
; SSE41-NEXT: movzbl (%rdi), %r9d
; SSE41-NEXT: andl $15, %r9d
; SSE41-NEXT: movzbl 1(%rdi), %ebx
; SSE41-NEXT: movzbl 2(%rdi), %eax
; SSE41-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; SSE41-NEXT: movzbl 3(%rdi), %r11d
; SSE41-NEXT: movzbl 4(%rdi), %r14d
; SSE41-NEXT: movzbl 5(%rdi), %r15d
; SSE41-NEXT: movzbl 6(%rdi), %r12d
; SSE41-NEXT: movzbl 7(%rdi), %r13d
; SSE41-NEXT: movzbl 8(%rdi), %r10d
; SSE41-NEXT: movzbl 9(%rdi), %r8d
; SSE41-NEXT: movzbl 10(%rdi), %ecx
; SSE41-NEXT: movzbl 11(%rdi), %edx
; SSE41-NEXT: movzbl 12(%rdi), %esi
; SSE41-NEXT: movzbl 13(%rdi), %ebp
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE41-NEXT: movzbl 14(%rdi), %eax
; SSE41-NEXT: movzbl 15(%rdi), %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: movzbl -24(%rsp,%r9), %r9d
; SSE41-NEXT: movd %r9d, %xmm0
; SSE41-NEXT: andl $15, %ebx
; SSE41-NEXT: pinsrb $1, -24(%rsp,%rbx), %xmm0
; SSE41-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; SSE41-NEXT: andl $15, %ebx
; SSE41-NEXT: pinsrb $2, -24(%rsp,%rbx), %xmm0
; SSE41-NEXT: andl $15, %r11d
; SSE41-NEXT: pinsrb $3, -24(%rsp,%r11), %xmm0
; SSE41-NEXT: andl $15, %r14d
; SSE41-NEXT: pinsrb $4, -24(%rsp,%r14), %xmm0
; SSE41-NEXT: andl $15, %r15d
; SSE41-NEXT: pinsrb $5, -24(%rsp,%r15), %xmm0
; SSE41-NEXT: andl $15, %r12d
; SSE41-NEXT: pinsrb $6, -24(%rsp,%r12), %xmm0
; SSE41-NEXT: andl $15, %r13d
; SSE41-NEXT: pinsrb $7, -24(%rsp,%r13), %xmm0
; SSE41-NEXT: andl $15, %r10d
; SSE41-NEXT: pinsrb $8, -24(%rsp,%r10), %xmm0
; SSE41-NEXT: andl $15, %r8d
; SSE41-NEXT: pinsrb $9, -24(%rsp,%r8), %xmm0
; SSE41-NEXT: andl $15, %ecx
; SSE41-NEXT: pinsrb $10, -24(%rsp,%rcx), %xmm0
; SSE41-NEXT: andl $15, %edx
; SSE41-NEXT: pinsrb $11, -24(%rsp,%rdx), %xmm0
; SSE41-NEXT: andl $15, %esi
; SSE41-NEXT: pinsrb $12, -24(%rsp,%rsi), %xmm0
; SSE41-NEXT: andl $15, %ebp
; SSE41-NEXT: pinsrb $13, -24(%rsp,%rbp), %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE41-NEXT: andl $15, %eax
; SSE41-NEXT: pinsrb $14, -24(%rsp,%rax), %xmm0
; SSE41-NEXT: andl $15, %edi
; SSE41-NEXT: pinsrb $15, -24(%rsp,%rdi), %xmm0
; SSE41-NEXT: popq %rbx
; SSE41-NEXT: popq %r12
; SSE41-NEXT: popq %r13
; SSE41-NEXT: popq %r14
; SSE41-NEXT: popq %r15
; SSE41-NEXT: popq %rbp
; SSE41-NEXT: retq
;
; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # %bb.0:
; AVX-NEXT: pushq %rbp
; AVX-NEXT: pushq %r15
; AVX-NEXT: pushq %r14
; AVX-NEXT: pushq %r13
; AVX-NEXT: pushq %r12
; AVX-NEXT: pushq %rbx
; AVX-NEXT: movzbl (%rdi), %r9d
; AVX-NEXT: andl $15, %r9d
; AVX-NEXT: movzbl 1(%rdi), %ebx
; AVX-NEXT: movzbl 2(%rdi), %eax
; AVX-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX-NEXT: movzbl 3(%rdi), %r11d
; AVX-NEXT: movzbl 4(%rdi), %r14d
; AVX-NEXT: movzbl 5(%rdi), %r15d
; AVX-NEXT: movzbl 6(%rdi), %r12d
; AVX-NEXT: movzbl 7(%rdi), %r13d
; AVX-NEXT: movzbl 8(%rdi), %r10d
; AVX-NEXT: movzbl 9(%rdi), %r8d
; AVX-NEXT: movzbl 10(%rdi), %ecx
; AVX-NEXT: movzbl 11(%rdi), %edx
; AVX-NEXT: movzbl 12(%rdi), %esi
; AVX-NEXT: movzbl 13(%rdi), %ebp
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; AVX-NEXT: movzbl 14(%rdi), %eax
; AVX-NEXT: movzbl 15(%rdi), %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movzbl -24(%rsp,%r9), %r9d
; AVX-NEXT: vmovd %r9d, %xmm0
; AVX-NEXT: andl $15, %ebx
; AVX-NEXT: vpinsrb $1, -24(%rsp,%rbx), %xmm0, %xmm0
; AVX-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; AVX-NEXT: andl $15, %ebx
; AVX-NEXT: vpinsrb $2, -24(%rsp,%rbx), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r11d
; AVX-NEXT: vpinsrb $3, -24(%rsp,%r11), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r14d
; AVX-NEXT: vpinsrb $4, -24(%rsp,%r14), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r15d
; AVX-NEXT: vpinsrb $5, -24(%rsp,%r15), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r12d
; AVX-NEXT: vpinsrb $6, -24(%rsp,%r12), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r13d
; AVX-NEXT: vpinsrb $7, -24(%rsp,%r13), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r10d
; AVX-NEXT: vpinsrb $8, -24(%rsp,%r10), %xmm0, %xmm0
; AVX-NEXT: andl $15, %r8d
; AVX-NEXT: vpinsrb $9, -24(%rsp,%r8), %xmm0, %xmm0
; AVX-NEXT: andl $15, %ecx
; AVX-NEXT: vpinsrb $10, -24(%rsp,%rcx), %xmm0, %xmm0
; AVX-NEXT: andl $15, %edx
; AVX-NEXT: vpinsrb $11, -24(%rsp,%rdx), %xmm0, %xmm0
; AVX-NEXT: andl $15, %esi
; AVX-NEXT: vpinsrb $12, -24(%rsp,%rsi), %xmm0, %xmm0
; AVX-NEXT: andl $15, %ebp
; AVX-NEXT: vpinsrb $13, -24(%rsp,%rbp), %xmm0, %xmm0
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; AVX-NEXT: andl $15, %eax
; AVX-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm0, %xmm0
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vpinsrb $15, -24(%rsp,%rdi), %xmm0, %xmm0
; AVX-NEXT: popq %rbx
; AVX-NEXT: popq %r12
; AVX-NEXT: popq %r13
; AVX-NEXT: popq %r14
; AVX-NEXT: popq %r15
; AVX-NEXT: popq %rbp
; AVX-NEXT: retq
%p0 = getelementptr inbounds i8, i8* %i, i64 0
%p1 = getelementptr inbounds i8, i8* %i, i64 1
%p2 = getelementptr inbounds i8, i8* %i, i64 2
%p3 = getelementptr inbounds i8, i8* %i, i64 3
%p4 = getelementptr inbounds i8, i8* %i, i64 4
%p5 = getelementptr inbounds i8, i8* %i, i64 5
%p6 = getelementptr inbounds i8, i8* %i, i64 6
%p7 = getelementptr inbounds i8, i8* %i, i64 7
%p8 = getelementptr inbounds i8, i8* %i, i64 8
%p9 = getelementptr inbounds i8, i8* %i, i64 9
%p10 = getelementptr inbounds i8, i8* %i, i64 10
%p11 = getelementptr inbounds i8, i8* %i, i64 11
%p12 = getelementptr inbounds i8, i8* %i, i64 12
%p13 = getelementptr inbounds i8, i8* %i, i64 13
%p14 = getelementptr inbounds i8, i8* %i, i64 14
%p15 = getelementptr inbounds i8, i8* %i, i64 15
%i0 = load i8, i8* %p0 , align 4
%i1 = load i8, i8* %p1 , align 4
%i2 = load i8, i8* %p2 , align 4
%i3 = load i8, i8* %p3 , align 4
%i4 = load i8, i8* %p4 , align 4
%i5 = load i8, i8* %p5 , align 4
%i6 = load i8, i8* %p6 , align 4
%i7 = load i8, i8* %p7 , align 4
%i8 = load i8, i8* %p8 , align 4
%i9 = load i8, i8* %p9 , align 4
%i10 = load i8, i8* %p10, align 4
%i11 = load i8, i8* %p11, align 4
%i12 = load i8, i8* %p12, align 4
%i13 = load i8, i8* %p13, align 4
%i14 = load i8, i8* %p14, align 4
%i15 = load i8, i8* %p15, align 4
%x0 = extractelement <16 x i8> %x, i8 %i0
%x1 = extractelement <16 x i8> %x, i8 %i1
%x2 = extractelement <16 x i8> %x, i8 %i2
%x3 = extractelement <16 x i8> %x, i8 %i3
%x4 = extractelement <16 x i8> %x, i8 %i4
%x5 = extractelement <16 x i8> %x, i8 %i5
%x6 = extractelement <16 x i8> %x, i8 %i6
%x7 = extractelement <16 x i8> %x, i8 %i7
%x8 = extractelement <16 x i8> %x, i8 %i8
%x9 = extractelement <16 x i8> %x, i8 %i9
%x10 = extractelement <16 x i8> %x, i8 %i10
%x11 = extractelement <16 x i8> %x, i8 %i11
%x12 = extractelement <16 x i8> %x, i8 %i12
%x13 = extractelement <16 x i8> %x, i8 %i13
%x14 = extractelement <16 x i8> %x, i8 %i14
%x15 = extractelement <16 x i8> %x, i8 %i15
%r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
%r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
%r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
%r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
%r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
%r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
%r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
%r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
%r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
%r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
%r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
%r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
%r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
%r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
%r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
%r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
ret <16 x i8> %r15
}
;
; Binary shuffle indices from registers
;
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE-NEXT: # kill: def $edx killed $edx def $rdx
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %edx
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %ecx
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %edx
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %ecx
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%x0 = extractelement <4 x float> %x, i32 %i0
%x1 = extractelement <4 x float> %x, i32 %i1
%y2 = extractelement <4 x float> %y, i32 %i2
%x3 = extractelement <4 x float> %x, i32 %i3
%r0 = insertelement <4 x float> undef, float %x0, i32 0
%r1 = insertelement <4 x float> %r0, float 0.0, i32 1
%r2 = insertelement <4 x float> %r1, float %y2, i32 2
%r3 = insertelement <4 x float> %r2, float %x3, i32 3
ret <4 x float> %r3
}
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE2-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE2-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE2-NEXT: # kill: def $edx killed $edx def $rdx
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
; SSE2-NEXT: andl $7, %ecx
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $7, %r8d
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $7, %r9d
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: movzwl -40(%rsp,%rdx,2), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax
; SSE2-NEXT: movd %eax, %xmm2
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movzwl -24(%rsp,%r9,2), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movzwl -40(%rsp,%r8,2), %eax
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSSE3: # %bb.0:
; SSSE3-NEXT: # kill: def $r9d killed $r9d def $r9
; SSSE3-NEXT: # kill: def $r8d killed $r8d def $r8
; SSSE3-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSSE3-NEXT: # kill: def $edx killed $edx def $rdx
; SSSE3-NEXT: # kill: def $esi killed $esi def $rsi
; SSSE3-NEXT: # kill: def $edi killed $edi def $rdi
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
; SSSE3-NEXT: andl $7, %ecx
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $7, %r8d
; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $7, %r9d
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: movzwl -40(%rsp,%rdx,2), %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax
; SSSE3-NEXT: movd %eax, %xmm2
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled. Recommiting with compiler time improvements Recommitting after fixup of 32-bit aliasing sign offset bug in DAGCombiner. * Simplify Consecutive Merge Store Candidate Search Now that address aliasing is much less conservative, push through simplified store merging search and chain alias analysis which only checks for parallel stores through the chain subgraph. This is cleaner as the separation of non-interfering loads/stores from the store-merging logic. When merging stores search up the chain through a single load, and finds all possible stores by looking down from through a load and a TokenFactor to all stores visited. This improves the quality of the output SelectionDAG and the output Codegen (save perhaps for some ARM cases where we correctly constructs wider loads, but then promotes them to float operations which appear but requires more expensive constant generation). Some minor peephole optimizations to deal with improved SubDAG shapes (listed below) Additional Minor Changes: 1. Finishes removing unused AliasLoad code 2. Unifies the chain aggregation in the merged stores across code paths 3. Re-add the Store node to the worklist after calling SimplifyDemandedBits. 4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is arbitrary, but seems sufficient to not cause regressions in tests. 5. Remove Chain dependencies of Memory operations on CopyfromReg nodes as these are captured by data dependence 6. Forward loads-store values through tokenfactors containing {CopyToReg,CopyFromReg} Values. 7. Peephole to convert buildvector of extract_vector_elt to extract_subvector if possible (see CodeGen/AArch64/store-merge.ll) 8. Store merging for the ARM target is restricted to 32-bit as some in some contexts invalid 64-bit operations are being generated. This can be removed once appropriate checks are added. This finishes the change Matt Arsenault started in r246307 and jyknight's original patch. Many tests required some changes as memory operations are now reorderable, improving load-store forwarding. One test in particular is worth noting: CodeGen/PowerPC/ppc64-align-long-double.ll - Improved load-store forwarding converts a load-store pair into a parallel store and a memory-realized bitcast of the same value. However, because we lose the sharing of the explicit and implicit store values we must create another local store. A similar transformation happens before SelectionDAG as well. Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle llvm-svn: 297695
2017-03-14 08:34:14 +08:00
; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax
; SSSE3-NEXT: movd %eax, %xmm1
; SSSE3-NEXT: movzwl -40(%rsp,%r8,2), %eax
; SSSE3-NEXT: movd %eax, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE41: # %bb.0:
; SSE41-NEXT: # kill: def $r9d killed $r9d def $r9
; SSE41-NEXT: # kill: def $r8d killed $r8d def $r8
; SSE41-NEXT: # kill: def $ecx killed $ecx def $rcx
; SSE41-NEXT: # kill: def $edx killed $edx def $rdx
; SSE41-NEXT: # kill: def $esi killed $esi def $rsi
; SSE41-NEXT: # kill: def $edi killed $edi def $rdi
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
; SSE41-NEXT: andl $7, %ecx
; SSE41-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: andl $7, %r8d
; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: andl $7, %r9d
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrw $0, -40(%rsp,%rdi,2), %xmm0
; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0
; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm0
; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0
; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm0
; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX: # %bb.0:
; AVX-NEXT: # kill: def $r9d killed $r9d def $r9
; AVX-NEXT: # kill: def $r8d killed $r8d def $r8
; AVX-NEXT: # kill: def $ecx killed $ecx def $rcx
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
; AVX-NEXT: # kill: def $edi killed $edi def $rdi
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
; AVX-NEXT: andl $7, %ecx
; AVX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $7, %r8d
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $7, %r9d
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrw $0, -40(%rsp,%rdi,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0
; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%y1 = extractelement <8 x i16> %y, i16 %i1
%x2 = extractelement <8 x i16> %x, i16 %i2
%y3 = extractelement <8 x i16> %y, i16 %i3
%x4 = extractelement <8 x i16> %x, i16 %i4
%y5 = extractelement <8 x i16> %y, i16 %i5
%x6 = extractelement <8 x i16> %x, i16 %i6
%x7 = extractelement <8 x i16> %x, i16 %i7
%r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
%r1 = insertelement <8 x i16> %r0, i16 %y1, i32 1
%r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
%r3 = insertelement <8 x i16> %r2, i16 %y3, i32 3
%r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
%r5 = insertelement <8 x i16> %r4, i16 %y5, i32 5
%r6 = insertelement <8 x i16> %r5, i16 0, i32 6
%r7 = insertelement <8 x i16> %r6, i16 0, i32 7
ret <8 x i16> %r7
}