Turning off post-ra scheduling for x86. It isn't a consistent win.

llvm-svn: 98810
This commit is contained in:
Evan Cheng 2010-03-18 06:55:42 +00:00
parent 5b59a73dc1
commit bf724b9ee0
6 changed files with 27 additions and 42 deletions

View File

@ -366,12 +366,3 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
if (StackAlignment)
stackAlignment = StackAlignment;
}
bool X86Subtarget::enablePostRAScheduler(
CodeGenOpt::Level OptLevel,
TargetSubtarget::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const {
Mode = TargetSubtarget::ANTIDEP_CRITICAL;
CriticalPathRCs.clear();
return OptLevel >= CodeGenOpt::Aggressive;
}

View File

@ -230,12 +230,6 @@ public:
/// indicating the number of scheduling cycles of backscheduling that
/// should be attempted.
unsigned getSpecialAddressLatency() const;
/// enablePostRAScheduler - X86 target is enabling post-alloc scheduling
/// at 'More' optimization level.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtarget::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
};
} // End llvm namespace

View File

@ -11,12 +11,12 @@ define float @foo(float %x) nounwind {
%tmp14 = fadd float %tmp12, %tmp7
ret float %tmp14
; CHECK: mulss LCPI1_3(%rip)
; CHECK-NEXT: mulss LCPI1_0(%rip)
; CHECK-NEXT: mulss LCPI1_1(%rip)
; CHECK-NEXT: mulss LCPI1_2(%rip)
; CHECK-NEXT: addss
; CHECK-NEXT: addss
; CHECK-NEXT: addss
; CHECK-NEXT: ret
; CHECK: mulss LCPI1_0(%rip)
; CHECK: mulss LCPI1_1(%rip)
; CHECK: addss
; CHECK: mulss LCPI1_2(%rip)
; CHECK: addss
; CHECK: mulss LCPI1_3(%rip)
; CHECK: addss
; CHECK: ret
}

View File

@ -8,10 +8,10 @@ target triple = "x86_64-unknown-unknown"
; CHECK: full_me_0:
; CHECK: movsd (%rsi), %xmm0
; CHECK: addq $8, %rsi
; CHECK: mulsd (%rdx), %xmm0
; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, (%rdi)
; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
@ -53,10 +53,10 @@ return:
; CHECK: mulsd -2048(%rdx), %xmm0
; CHECK: movsd %xmm0, -2048(%rdi)
; CHECK: movsd (%rsi), %xmm0
; CHECK: addq $8, %rsi
; CHECK: divsd (%rdx), %xmm0
; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, (%rdi)
; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
@ -99,10 +99,10 @@ return:
; CHECK: mulsd (%rdx), %xmm0
; CHECK: movsd %xmm0, (%rdi)
; CHECK: movsd -2048(%rsi), %xmm0
; CHECK: addq $8, %rsi
; CHECK: divsd -2048(%rdx), %xmm0
; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, -2048(%rdi)
; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
@ -144,10 +144,10 @@ return:
; CHECK: mulsd (%rdx), %xmm0
; CHECK: movsd %xmm0, (%rdi)
; CHECK: movsd -4096(%rsi), %xmm0
; CHECK: addq $8, %rsi
; CHECK: divsd -4096(%rdx), %xmm0
; CHECK: addq $8, %rdx
; CHECK: movsd %xmm0, -4096(%rdi)
; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: addq $8, %rdi
; CHECK: decq %rcx
; CHECK: jne
@ -310,10 +310,10 @@ return:
; CHECK: addsd (%rsi), %xmm0
; CHECK: movsd %xmm0, (%rdx)
; CHECK: movsd 40(%rdi), %xmm0
; CHECK: addq $8, %rdi
; CHECK: subsd 40(%rsi), %xmm0
; CHECK: addq $8, %rsi
; CHECK: movsd %xmm0, 40(%rdx)
; CHECK: addq $8, %rdi
; CHECK: addq $8, %rsi
; CHECK: addq $8, %rdx
; CHECK: decq %rcx
; CHECK: jne

View File

@ -10,10 +10,10 @@ define void @t1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; CHECK: t1:
; CHECK: movl 8(%esp), %eax
; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movapd (%eax), %xmm0
; CHECK-NEXT: movlpd 12(%esp), %xmm0
; CHECK-NEXT: movapd %xmm0, (%ecx)
; CHECK-NEXT: movl 4(%esp), %eax
; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: ret
}
@ -26,9 +26,9 @@ define void @t2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; CHECK: t2:
; CHECK: movl 8(%esp), %eax
; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movapd (%eax), %xmm0
; CHECK-NEXT: movhpd 12(%esp), %xmm0
; CHECK-NEXT: movapd %xmm0, (%ecx)
; CHECK-NEXT: movl 4(%esp), %eax
; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: ret
}

View File

@ -17,8 +17,8 @@ entry:
; X64: t0:
; X64: movddup (%rsi), %xmm0
; X64: xorl %eax, %eax
; X64: pshuflw $0, %xmm0, %xmm0
; X64: xorl %eax, %eax
; X64: pinsrw $0, %eax, %xmm0
; X64: movaps %xmm0, (%rdi)
; X64: ret
@ -169,11 +169,11 @@ define internal void @t10() nounwind {
ret void
; X64: t10:
; X64: pextrw $4, %xmm0, %eax
; X64: pextrw $6, %xmm0, %edx
; X64: movlhps %xmm1, %xmm1
; X64: pshuflw $8, %xmm1, %xmm1
; X64: pinsrw $2, %eax, %xmm1
; X64: pinsrw $3, %edx, %xmm1
; X64: pextrw $6, %xmm0, %eax
; X64: pinsrw $3, %eax, %xmm1
}
@ -184,8 +184,8 @@ entry:
ret <8 x i16> %tmp7
; X64: t11:
; X64: movlhps %xmm0, %xmm0
; X64: movd %xmm1, %eax
; X64: movlhps %xmm0, %xmm0
; X64: pshuflw $1, %xmm0, %xmm0
; X64: pinsrw $1, %eax, %xmm0
; X64: ret
@ -198,8 +198,8 @@ entry:
ret <8 x i16> %tmp9
; X64: t12:
; X64: movlhps %xmm0, %xmm0
; X64: pextrw $3, %xmm1, %eax
; X64: movlhps %xmm0, %xmm0
; X64: pshufhw $3, %xmm0, %xmm0
; X64: pinsrw $5, %eax, %xmm0
; X64: ret