llvm-project/llvm/test/CodeGen/PowerPC/fma-mutate.ll

48 lines
1.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+vsx -disable-ppc-vsx-fma-mutation=false | FileCheck %s
declare double @llvm.sqrt.f64(double)
; Test several VSX FMA mutation opportunities.
; This is reasonable transformation since it eliminates extra register copy.
define double @foo3_fmf(double %a) nounwind {
; CHECK-LABEL: foo3_fmf:
; CHECK: # %bb.0:
; CHECK-NEXT: xsabsdp 0, 1
; CHECK-NEXT: addis 3, 2, .LCPI0_2@toc@ha
; CHECK-NEXT: lfd 2, .LCPI0_2@toc@l(3)
; CHECK-NEXT: xscmpudp 0, 0, 2
; CHECK-NEXT: xxlxor 0, 0, 0
; CHECK-NEXT: blt 0, .LBB0_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: xsrsqrtedp 0, 1
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
; CHECK-NEXT: lfs 3, .LCPI0_0@toc@l(3)
; CHECK-NEXT: addis 3, 2, .LCPI0_1@toc@ha
; CHECK-NEXT: lfs 4, .LCPI0_1@toc@l(3)
; CHECK-NEXT: xsmuldp 2, 1, 0
; CHECK-NEXT: xsmaddmdp 2, 0, 3
; CHECK-NEXT: xsmuldp 0, 0, 4
; CHECK-NEXT: xsmuldp 0, 0, 2
; CHECK-NEXT: xsmuldp 1, 1, 0
; CHECK-NEXT: xsmaddadp 3, 1, 0
; CHECK-NEXT: xsmuldp 0, 1, 4
; CHECK-NEXT: xsmuldp 0, 0, 3
; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: fmr 1, 0
; CHECK-NEXT: blr
%r = call reassoc afn ninf double @llvm.sqrt.f64(double %a)
ret double %r
}
define double @foo3_safe(double %a) nounwind {
; CHECK-LABEL: foo3_safe:
; CHECK: # %bb.0:
; CHECK-NEXT: xssqrtdp 1, 1
; CHECK-NEXT: blr
%r = call double @llvm.sqrt.f64(double %a)
ret double %r
}