[X86] Regenerate load fold peephole test.

llvm-svn: 365136
This commit is contained in:
Simon Pilgrim 2019-07-04 12:33:37 +00:00
parent 1ca2ee4dc1
commit 8351c32764
1 changed files with 14 additions and 4 deletions

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
;
; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into
@ -10,10 +11,19 @@
declare void @foo3(%struct.S1*)
; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]]
; CHECK: addpd [[R0]], %xmm{{[0-9]+}}
define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) {
define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) nounwind {
; CHECK-LABEL: foo1:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: movq %rsp, %rdi
; CHECK-NEXT: callq foo3
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movapd {{.*#+}} xmm1 = <1.0E+0,u>
; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; CHECK-NEXT: addpd %xmm0, %xmm1
; CHECK-NEXT: movapd %xmm1, {{.*}}(%rip)
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%1 = alloca <2 x double>, align 16
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
call void @foo3(%struct.S1* %tmpcast) #2