diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index d510e7ebf100..69b496f386ea 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -219,8 +219,10 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) { if (isTargetMachO()) { IsR9Reserved = ReserveR9 | !HasV6Ops; SupportsTailCall = !isTargetIOS() || !getTargetTriple().isOSVersionLT(5, 0); - } else + } else { IsR9Reserved = ReserveR9; + SupportsTailCall = !isThumb1Only(); + } if (!isThumb() || hasThumb2()) PostRAScheduler = true; diff --git a/llvm/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll b/llvm/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll index 08bf99b31f54..6bd23b102e79 100644 --- a/llvm/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll +++ b/llvm/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll @@ -72,7 +72,7 @@ define void @foo(double %p0, ; --> D0 double %p8, ; --> Stack i32 %p9) #0 { ; --> R0, not Stack+8 entry: - tail call void @fooUseI32(i32 %p9) + call void @fooUseI32(i32 %p9) ret void } diff --git a/llvm/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll b/llvm/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll index 054a45ced1d0..33bfa2fa61cd 100644 --- a/llvm/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll +++ b/llvm/test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll @@ -23,7 +23,7 @@ define void @foo1(i32 %a, %struct12bytes* byval %b, i64 %c) { ; CHECK: pop {r11, lr} ; CHECK: add sp, sp, #16 - tail call void @useLong(i64 %c) + call void @useLong(i64 %c) ret void } @@ -40,7 +40,7 @@ define void @foo2(i32 %a, %struct8bytes8align* byval %b) { ; CHECK: pop {r11, lr} ; CHECK: add sp, sp, #8 - tail call void @usePtr(%struct8bytes8align* %b) + call void @usePtr(%struct8bytes8align* %b) ret void } @@ -57,7 +57,7 @@ define void @foo3(%struct8bytes8align* byval %a, %struct4bytes* byval %b) { ; CHECK: pop {r11, lr} ; CHECK: add sp, sp, #16 - tail call void @usePtr(%struct8bytes8align* %a) + call void @usePtr(%struct8bytes8align* %a) ret void } @@ -76,7 +76,7 @@ define void @foo4(%struct4bytes* byval %a, %struct8bytes8align* byval %b) { ; CHECK: add sp, sp, #16 ; CHECK: mov pc, lr - tail call void @usePtr(%struct8bytes8align* %b) + call void @usePtr(%struct8bytes8align* %b) ret void } @@ -95,7 +95,7 @@ define void @foo5(%struct8bytes8align* byval %a, %struct4bytes* byval %b, %struc ; CHECK: add sp, sp, #16 ; CHECK: mov pc, lr - tail call void @usePtr(%struct8bytes8align* %a) + call void @usePtr(%struct8bytes8align* %a) ret void } @@ -109,6 +109,6 @@ define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval %d) { ; CHECK: pop {r11, lr} ; CHECK: mov pc, lr - tail call void @usePtr(%struct8bytes8align* %d) + call void @usePtr(%struct8bytes8align* %d) ret void } diff --git a/llvm/test/CodeGen/ARM/debug-frame.ll b/llvm/test/CodeGen/ARM/debug-frame.ll index e18501a387c8..73357df8e2eb 100644 --- a/llvm/test/CodeGen/ARM/debug-frame.ll +++ b/llvm/test/CodeGen/ARM/debug-frame.ll @@ -301,7 +301,7 @@ declare void @throw_exception_2() define void @test2() { entry: - tail call void @throw_exception_2() + call void @throw_exception_2() ret void } diff --git a/llvm/test/CodeGen/ARM/ehabi.ll b/llvm/test/CodeGen/ARM/ehabi.ll index 2cf3ae5e1f3e..720cc3ce8700 100644 --- a/llvm/test/CodeGen/ARM/ehabi.ll +++ b/llvm/test/CodeGen/ARM/ehabi.ll @@ -181,7 +181,7 @@ declare void @throw_exception_2() define void @test2() { entry: - tail call void @throw_exception_2() + call void @throw_exception_2() ret void } diff --git a/llvm/test/CodeGen/ARM/v1-constant-fold.ll b/llvm/test/CodeGen/ARM/v1-constant-fold.ll index eb49a81ab763..7421d25c1780 100644 --- a/llvm/test/CodeGen/ARM/v1-constant-fold.ll +++ b/llvm/test/CodeGen/ARM/v1-constant-fold.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s ; PR15611. Check that we don't crash when constant folding v1i32 types. @@ -11,7 +11,7 @@ bb: %tmp3 = insertelement <4 x i32> %tmp2, i32 0, i32 3 %tmp4 = add <4 x i32> %tmp3, ; CHECK: bl bar - tail call void @bar(<4 x i32> %tmp4) + call void @bar(<4 x i32> %tmp4) ret void }