diff --git a/llvm/test/CodeGen/X86/widen_load-3.ll b/llvm/test/CodeGen/X86/widen_load-3.ll index ad8d2324e5cf..316f78593acd 100644 --- a/llvm/test/CodeGen/X86/widen_load-3.ll +++ b/llvm/test/CodeGen/X86/widen_load-3.ll @@ -129,7 +129,7 @@ define <7 x i64> @load7_unaligned(<7 x i64>* %x) { ; PR42305 - https://bugs.llvm.org/show_bug.cgi?id=42305 -define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) { +define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) nounwind { ; X86-SSE-LABEL: load_split: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -174,3 +174,71 @@ define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) store <4 x float> %t128, <4 x float>* %st2, align 1 ret void } + +define void @load_split_more(float* %src, i32* %idx, float* %dst) nounwind { +; X86-SSE-LABEL: load_split_more: +; X86-SSE: # %bb.0: +; X86-SSE-NEXT: pushl %esi +; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-SSE-NEXT: movl (%edx), %esi +; X86-SSE-NEXT: movups (%ecx), %xmm0 +; X86-SSE-NEXT: movups 16(%ecx), %xmm1 +; X86-SSE-NEXT: movups %xmm0, (%eax,%esi,4) +; X86-SSE-NEXT: movl 4(%edx), %ecx +; X86-SSE-NEXT: movups %xmm1, (%eax,%ecx,4) +; X86-SSE-NEXT: popl %esi +; X86-SSE-NEXT: retl +; +; X86-AVX-LABEL: load_split_more: +; X86-AVX: # %bb.0: +; X86-AVX-NEXT: pushl %esi +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-AVX-NEXT: movl (%edx), %esi +; X86-AVX-NEXT: vmovups (%ecx), %xmm0 +; X86-AVX-NEXT: vmovups 16(%ecx), %xmm1 +; X86-AVX-NEXT: vmovups %xmm0, (%eax,%esi,4) +; X86-AVX-NEXT: movl 4(%edx), %ecx +; X86-AVX-NEXT: vmovups %xmm1, (%eax,%ecx,4) +; X86-AVX-NEXT: popl %esi +; X86-AVX-NEXT: retl +; +; X64-SSE-LABEL: load_split_more: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: movslq (%rsi), %rax +; X64-SSE-NEXT: movups (%rdi), %xmm0 +; X64-SSE-NEXT: movups 16(%rdi), %xmm1 +; X64-SSE-NEXT: movups %xmm0, (%rdx,%rax,4) +; X64-SSE-NEXT: movslq 4(%rsi), %rax +; X64-SSE-NEXT: movups %xmm1, (%rdx,%rax,4) +; X64-SSE-NEXT: retq +; +; X64-AVX-LABEL: load_split_more: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: movslq (%rsi), %rax +; X64-AVX-NEXT: vmovups (%rdi), %xmm0 +; X64-AVX-NEXT: vmovups 16(%rdi), %xmm1 +; X64-AVX-NEXT: vmovups %xmm0, (%rdx,%rax,4) +; X64-AVX-NEXT: movslq 4(%rsi), %rax +; X64-AVX-NEXT: vmovups %xmm1, (%rdx,%rax,4) +; X64-AVX-NEXT: retq + %v.i = bitcast float* %src to <8 x float>* + %tmp = load <8 x float>, <8 x float>* %v.i, align 1 + %tmp1 = load i32, i32* %idx, align 4 + %idx.ext = sext i32 %tmp1 to i64 + %add.ptr1 = getelementptr inbounds float, float* %dst, i64 %idx.ext + %extract = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> + %v.i11 = bitcast float* %add.ptr1 to <4 x float>* + store <4 x float> %extract, <4 x float>* %v.i11, align 1 + %arrayidx2 = getelementptr inbounds i32, i32* %idx, i64 1 + %tmp2 = load i32, i32* %arrayidx2, align 4 + %idx.ext3 = sext i32 %tmp2 to i64 + %add.ptr4 = getelementptr inbounds float, float* %dst, i64 %idx.ext3 + %extract5 = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32> + %v.i10 = bitcast float* %add.ptr4 to <4 x float>* + store <4 x float> %extract5, <4 x float>* %v.i10, align 1 + ret void +}