ARM: enable tail call optimisation on Thumb 2

Tail call optimisation was previously disabled on all targets other than
iOS5.0+.  This enables the tail call optimisation on all Thumb 2 capable
platforms.

The test adjustments are to remove the IR hint "tail" to function invocation.
The tests were designed assuming that tail call optimisations would not kick in
which no longer holds true.

llvm-svn: 203575
This commit is contained in:
Saleem Abdulrasool 2014-03-11 15:09:44 +00:00
parent 3f5dcc97e0
commit ec1ec1b416
6 changed files with 14 additions and 12 deletions

View File

@ -219,8 +219,10 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
if (isTargetMachO()) {
IsR9Reserved = ReserveR9 | !HasV6Ops;
SupportsTailCall = !isTargetIOS() || !getTargetTriple().isOSVersionLT(5, 0);
} else
} else {
IsR9Reserved = ReserveR9;
SupportsTailCall = !isThumb1Only();
}
if (!isThumb() || hasThumb2())
PostRAScheduler = true;

View File

@ -72,7 +72,7 @@ define void @foo(double %p0, ; --> D0
double %p8, ; --> Stack
i32 %p9) #0 { ; --> R0, not Stack+8
entry:
tail call void @fooUseI32(i32 %p9)
call void @fooUseI32(i32 %p9)
ret void
}

View File

@ -23,7 +23,7 @@ define void @foo1(i32 %a, %struct12bytes* byval %b, i64 %c) {
; CHECK: pop {r11, lr}
; CHECK: add sp, sp, #16
tail call void @useLong(i64 %c)
call void @useLong(i64 %c)
ret void
}
@ -40,7 +40,7 @@ define void @foo2(i32 %a, %struct8bytes8align* byval %b) {
; CHECK: pop {r11, lr}
; CHECK: add sp, sp, #8
tail call void @usePtr(%struct8bytes8align* %b)
call void @usePtr(%struct8bytes8align* %b)
ret void
}
@ -57,7 +57,7 @@ define void @foo3(%struct8bytes8align* byval %a, %struct4bytes* byval %b) {
; CHECK: pop {r11, lr}
; CHECK: add sp, sp, #16
tail call void @usePtr(%struct8bytes8align* %a)
call void @usePtr(%struct8bytes8align* %a)
ret void
}
@ -76,7 +76,7 @@ define void @foo4(%struct4bytes* byval %a, %struct8bytes8align* byval %b) {
; CHECK: add sp, sp, #16
; CHECK: mov pc, lr
tail call void @usePtr(%struct8bytes8align* %b)
call void @usePtr(%struct8bytes8align* %b)
ret void
}
@ -95,7 +95,7 @@ define void @foo5(%struct8bytes8align* byval %a, %struct4bytes* byval %b, %struc
; CHECK: add sp, sp, #16
; CHECK: mov pc, lr
tail call void @usePtr(%struct8bytes8align* %a)
call void @usePtr(%struct8bytes8align* %a)
ret void
}
@ -109,6 +109,6 @@ define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval %d) {
; CHECK: pop {r11, lr}
; CHECK: mov pc, lr
tail call void @usePtr(%struct8bytes8align* %d)
call void @usePtr(%struct8bytes8align* %d)
ret void
}

View File

@ -301,7 +301,7 @@ declare void @throw_exception_2()
define void @test2() {
entry:
tail call void @throw_exception_2()
call void @throw_exception_2()
ret void
}

View File

@ -181,7 +181,7 @@ declare void @throw_exception_2()
define void @test2() {
entry:
tail call void @throw_exception_2()
call void @throw_exception_2()
ret void
}

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s
; PR15611. Check that we don't crash when constant folding v1i32 types.
@ -11,7 +11,7 @@ bb:
%tmp3 = insertelement <4 x i32> %tmp2, i32 0, i32 3
%tmp4 = add <4 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK: bl bar
tail call void @bar(<4 x i32> %tmp4)
call void @bar(<4 x i32> %tmp4)
ret void
}