From 8351c3276476812b4ea8f9d1031a8edc3ee94a82 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 4 Jul 2019 12:33:37 +0000 Subject: [PATCH] [X86] Regenerate load fold peephole test. llvm-svn: 365136 --- llvm/test/CodeGen/X86/peephole-fold-movsd.ll | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/llvm/test/CodeGen/X86/peephole-fold-movsd.ll b/llvm/test/CodeGen/X86/peephole-fold-movsd.ll index 818040a6f02c..fbfa0d474846 100644 --- a/llvm/test/CodeGen/X86/peephole-fold-movsd.ll +++ b/llvm/test/CodeGen/X86/peephole-fold-movsd.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s ; ; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into @@ -10,10 +11,19 @@ declare void @foo3(%struct.S1*) -; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]] -; CHECK: addpd [[R0]], %xmm{{[0-9]+}} - -define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) { +define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) nounwind { +; CHECK-LABEL: foo1: +; CHECK: # %bb.0: +; CHECK-NEXT: subq $24, %rsp +; CHECK-NEXT: movq %rsp, %rdi +; CHECK-NEXT: callq foo3 +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movapd {{.*#+}} xmm1 = <1.0E+0,u> +; CHECK-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; CHECK-NEXT: addpd %xmm0, %xmm1 +; CHECK-NEXT: movapd %xmm1, {{.*}}(%rip) +; CHECK-NEXT: addq $24, %rsp +; CHECK-NEXT: retq %1 = alloca <2 x double>, align 16 %tmpcast = bitcast <2 x double>* %1 to %struct.S1* call void @foo3(%struct.S1* %tmpcast) #2