2014-09-16 02:54:41 +08:00
|
|
|
; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s
|
[X86] Fix a bug in X86's peephole optimization.
Peephole optimization was folding MOVSDrm, which is a zero-extending double
precision floating point load, into ADDPDrr, which is a SIMD add of two packed
double precision floating point values.
(before)
%vreg21<def> = MOVSDrm <fi#0>, 1, %noreg, 0, %noreg; mem:LD8[%7](align=16)(tbaa=<badref>) VR128:%vreg21
%vreg23<def,tied1> = ADDPDrr %vreg20<tied0>, %vreg21; VR128:%vreg23,%vreg20,%vreg21
(after)
%vreg23<def,tied1> = ADDPDrm %vreg20<tied0>, <fi#0>, 1, %noreg, 0, %noreg; mem:LD8[%7](align=16)(tbaa=<badref>) VR128:%vreg23,%vreg20
X86InstrInfo::foldMemoryOperandImpl already had the logic that prevented this
from happening. However the check wasn't being conducted for loads from stack
objects. This commit factors out the logic into a new function and uses it for
checking loads from stack slots are not zero-extending loads.
rdar://problem/18236850
llvm-svn: 217799
2014-09-16 02:23:52 +08:00
|
|
|
;
|
|
|
|
; Check that x86's peephole optimization doesn't fold a 64-bit load (movsd) into
|
|
|
|
; addpd.
|
|
|
|
; rdar://problem/18236850
|
|
|
|
|
|
|
|
%struct.S1 = type { double, double }
|
|
|
|
|
|
|
|
@g = common global %struct.S1 zeroinitializer, align 8
|
|
|
|
|
|
|
|
declare void @foo3(%struct.S1*)
|
|
|
|
|
2014-09-16 03:00:31 +08:00
|
|
|
; CHECK: movsd {{[0-9]*}}(%rsp), [[R0:%xmm[0-9]+]]
|
[X86] Fix a bug in X86's peephole optimization.
Peephole optimization was folding MOVSDrm, which is a zero-extending double
precision floating point load, into ADDPDrr, which is a SIMD add of two packed
double precision floating point values.
(before)
%vreg21<def> = MOVSDrm <fi#0>, 1, %noreg, 0, %noreg; mem:LD8[%7](align=16)(tbaa=<badref>) VR128:%vreg21
%vreg23<def,tied1> = ADDPDrr %vreg20<tied0>, %vreg21; VR128:%vreg23,%vreg20,%vreg21
(after)
%vreg23<def,tied1> = ADDPDrm %vreg20<tied0>, <fi#0>, 1, %noreg, 0, %noreg; mem:LD8[%7](align=16)(tbaa=<badref>) VR128:%vreg23,%vreg20
X86InstrInfo::foldMemoryOperandImpl already had the logic that prevented this
from happening. However the check wasn't being conducted for loads from stack
objects. This commit factors out the logic into a new function and uses it for
checking loads from stack slots are not zero-extending loads.
rdar://problem/18236850
llvm-svn: 217799
2014-09-16 02:23:52 +08:00
|
|
|
; CHECK: addpd [[R0]], %xmm{{[0-9]+}}
|
|
|
|
|
|
|
|
define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, double %b.coerce1) {
|
|
|
|
%1 = alloca <2 x double>, align 16
|
|
|
|
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
|
|
|
|
call void @foo3(%struct.S1* %tmpcast) #2
|
|
|
|
%p2 = getelementptr inbounds %struct.S1* %tmpcast, i64 0, i32 0
|
|
|
|
%2 = load double* %p2, align 16
|
|
|
|
%p3 = getelementptr inbounds %struct.S1* %tmpcast, i64 0, i32 1
|
|
|
|
%3 = load double* %p3, align 8
|
|
|
|
%4 = insertelement <2 x double> undef, double %2, i32 0
|
|
|
|
%5 = insertelement <2 x double> %4, double 0.000000e+00, i32 1
|
|
|
|
%6 = insertelement <2 x double> undef, double %3, i32 1
|
|
|
|
%7 = insertelement <2 x double> %6, double 1.000000e+00, i32 0
|
|
|
|
%8 = fadd <2 x double> %5, %7
|
|
|
|
store <2 x double> %8, <2 x double>* bitcast (%struct.S1* @g to <2 x double>*), align 16
|
|
|
|
ret void
|
|
|
|
}
|