[X86][SSE] Update extractelement test in preparation for D52140

SimplifyDemandedVectorEltsForTargetNode will remove most of this test unless get rid of the undefs - still testing for align 1 which was the point of the test

Removed out of date comment as well

llvm-svn: 342531
This commit is contained in:
Simon Pilgrim 2018-09-19 09:50:32 +00:00
parent 467c5f9ce0
commit e2b16389e7
1 changed files with 7 additions and 7 deletions

View File

@ -51,30 +51,30 @@ define i32 @t2(<8 x i32>* %xp) {
; low alignment load of the vector which prevents us from reliably forming a
; narrow load.
; The expected codegen is identical for the AVX case except
; load/store instructions will have a leading 'v', so we don't
; need to special-case the checks.
define void @t3() {
define void @t3(<2 x double>* %a0) {
; X32-SSE2-LABEL: t3:
; X32-SSE2: # %bb.0: # %bb
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE2-NEXT: movupd (%eax), %xmm0
; X32-SSE2-NEXT: movhpd %xmm0, (%eax)
; X32-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t3:
; X64-SSSE3: # %bb.0: # %bb
; X64-SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; X64-SSSE3-NEXT: movlpd %xmm0, (%rax)
; X64-SSSE3-NEXT: retq
;
; X64-AVX-LABEL: t3:
; X64-AVX: # %bb.0: # %bb
; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-AVX-NEXT: vmovlpd %xmm0, (%rax)
; X64-AVX-NEXT: retq
bb:
%tmp13 = load <2 x double>, <2 x double>* undef, align 1
%tmp13 = load <2 x double>, <2 x double>* %a0, align 1
%.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1
store double %.sroa.3.24.vec.extract, double* undef, align 8
unreachable
ret void
}
; Case where a load is unary shuffled, then bitcast (to a type with the same