2015-11-18 08:40:54 +08:00
|
|
|
; Disable shrink-wrapping on the first test otherwise we wouldn't
|
|
|
|
; exerce the path for PR18136.
|
2019-04-02 07:55:57 +08:00
|
|
|
; RUN: llc -mtriple=thumbv7-apple-none-macho < %s -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-FNSTART,CHECK
|
|
|
|
; RUN: llc -mtriple=thumbv6m-apple-none-macho -frame-pointer=all < %s -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-FNSTART,CHECK-T1
|
|
|
|
; RUN: llc -mtriple=thumbv6m-apple-none-macho < %s -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-FNSTART,CHECK-T1-NOFP
|
|
|
|
; RUN: llc -mtriple=thumbv7-apple-darwin-ios -frame-pointer=all < %s -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-FNSTART,CHECK-IOS
|
|
|
|
; RUN: llc -mtriple=thumbv7--linux-gnueabi -frame-pointer=all < %s -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK-FNSTART,CHECK-LINUX
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
declare void @bar(i8*)
|
|
|
|
|
|
|
|
%bigVec = type [2 x double]
|
|
|
|
|
|
|
|
@var = global %bigVec zeroinitializer
|
|
|
|
|
|
|
|
define void @check_simple() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_simple:
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: push {r3, r4, r5, r6, r7, lr}
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK-NOT: sub sp, sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-NOT: add sp, sp,
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: pop {r0, r1, r2, r3, r7, pc}
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
; CHECK-T1: push {r3, r4, r5, r6, r7, lr}
|
|
|
|
; CHECK-T1: add r7, sp, #16
|
|
|
|
; CHECK-T1-NOT: sub sp, sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-T1-NOT: add sp, sp,
|
2013-12-01 22:16:24 +08:00
|
|
|
; CHECK-T1: pop {r0, r1, r2, r3, r7, pc}
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
; iOS always has a frame pointer and messing with the push affects
|
|
|
|
; how it's set in the prologue. Make sure we get that right.
|
|
|
|
; CHECK-IOS: push {r3, r4, r5, r6, r7, lr}
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; CHECK-IOS: add r7, sp, #16
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-NOT: add sp,
|
|
|
|
; CHEC: pop {r3, r4, r5, r6, r7, pc}
|
|
|
|
|
|
|
|
%var = alloca i8, i32 16
|
|
|
|
call void @bar(i8* %var)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2019-09-03 17:51:19 +08:00
|
|
|
define i32 @check_simple_ret() minsize {
|
|
|
|
; CHECK-FNSTART-LABEL: check_simple_ret:
|
|
|
|
; CHECK: push {r5, r6, r7, lr}
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-NOT: add sp,
|
|
|
|
; CHECK: pop {r2, r3, r7, pc}
|
|
|
|
|
|
|
|
%var = alloca i8, i32 8
|
|
|
|
call void @bar(i8* %var)
|
|
|
|
ret i32 0
|
|
|
|
}
|
|
|
|
|
2013-11-09 01:18:07 +08:00
|
|
|
define void @check_simple_too_big() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_simple_too_big:
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: push {r7, lr}
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK: add sp,
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: pop {r7, pc}
|
2013-11-09 01:18:07 +08:00
|
|
|
%var = alloca i8, i32 64
|
|
|
|
call void @bar(i8* %var)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @check_vfp_fold() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_vfp_fold:
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK: push {r[[GLOBREG:[0-9]+]], lr}
|
|
|
|
; CHECK: vpush {d6, d7, d8, d9}
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-NOT: add sp,
|
|
|
|
; CHECK: vpop {d6, d7, d8, d9}
|
2015-08-11 03:01:27 +08:00
|
|
|
; CHECK: pop {r[[GLOBREG]], pc}
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
; iOS uses aligned NEON stores here, which is convenient since we
|
|
|
|
; want to make sure that works too.
|
2015-09-24 06:21:09 +08:00
|
|
|
; CHECK-IOS: push {r4, r7, lr}
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK-IOS: sub.w r4, sp, #16
|
2015-01-08 23:09:14 +08:00
|
|
|
; CHECK-IOS: bfc r4, #0, #4
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK-IOS: mov sp, r4
|
|
|
|
; CHECK-IOS: vst1.64 {d8, d9}, [r4:128]
|
2015-09-24 06:21:09 +08:00
|
|
|
; CHECK-IOS: sub sp, #16
|
2013-11-09 01:18:07 +08:00
|
|
|
; ...
|
|
|
|
; CHECK-IOS: add r4, sp, #16
|
|
|
|
; CHECK-IOS: vld1.64 {d8, d9}, [r4:128]
|
|
|
|
; CHECK-IOS: mov sp, r4
|
|
|
|
; CHECK-IOS: pop {r4, r7, pc}
|
|
|
|
|
|
|
|
%var = alloca i8, i32 16
|
|
|
|
|
2015-08-04 01:20:10 +08:00
|
|
|
call void asm "", "r,~{d8},~{d9}"(i8* %var)
|
2013-11-09 01:18:07 +08:00
|
|
|
call void @bar(i8* %var)
|
|
|
|
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; This function should use just enough space that the "add sp, sp, ..." could be
|
|
|
|
; folded in except that doing so would clobber the value being returned.
|
|
|
|
define i64 @check_no_return_clobber() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_no_return_clobber:
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: push {r1, r2, r3, r4, r5, r6, r7, lr}
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
2014-01-06 22:28:05 +08:00
|
|
|
; CHECK: add sp, #24
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: pop {r7, pc}
|
2013-11-09 01:18:07 +08:00
|
|
|
|
2014-01-06 22:28:05 +08:00
|
|
|
%var = alloca i8, i32 20
|
2013-11-09 01:18:07 +08:00
|
|
|
call void @bar(i8* %var)
|
|
|
|
ret i64 0
|
|
|
|
}
|
|
|
|
|
|
|
|
define arm_aapcs_vfpcc double @check_vfp_no_return_clobber() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_vfp_no_return_clobber:
|
2013-11-09 01:18:07 +08:00
|
|
|
; CHECK: push {r[[GLOBREG:[0-9]+]], lr}
|
|
|
|
; CHECK: vpush {d0, d1, d2, d3, d4, d5, d6, d7, d8, d9}
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK: add sp, #64
|
|
|
|
; CHECK: vpop {d8, d9}
|
|
|
|
; CHECK: pop {r[[GLOBREG]], pc}
|
|
|
|
|
|
|
|
%var = alloca i8, i32 64
|
|
|
|
|
2015-02-28 05:17:42 +08:00
|
|
|
%tmp = load %bigVec, %bigVec* @var
|
2013-11-09 01:18:07 +08:00
|
|
|
call void @bar(i8* %var)
|
|
|
|
store %bigVec %tmp, %bigVec* @var
|
|
|
|
|
|
|
|
ret double 1.0
|
|
|
|
}
|
2013-12-05 19:02:02 +08:00
|
|
|
|
|
|
|
@dbl = global double 0.0
|
|
|
|
|
|
|
|
; PR18136: there was a bug determining where the first eligible pop in a
|
|
|
|
; basic-block was when the entire block was epilogue code.
|
|
|
|
define void @test_fold_point(i1 %tst) minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: test_fold_point:
|
2013-12-05 19:02:02 +08:00
|
|
|
|
|
|
|
; Important to check for beginning of basic block, because if it gets
|
|
|
|
; if-converted the test is probably no longer checking what it should.
|
Codegen: Make chains from trellis-shaped CFGs
Lay out trellis-shaped CFGs optimally.
A trellis of the shape below:
A B
|\ /|
| \ / |
| X |
| / \ |
|/ \|
C D
would be laid out A; B->C ; D by the current layout algorithm. Now we identify
trellises and lay them out either A->C; B->D or A->D; B->C. This scales with an
increasing number of predecessors. A trellis is a a group of 2 or more
predecessor blocks that all have the same successors.
because of this we can tail duplicate to extend existing trellises.
As an example consider the following CFG:
B D F H
/ \ / \ / \ / \
A---C---E---G---Ret
Where A,C,E,G are all small (Currently 2 instructions).
The CFG preserving layout is then A,B,C,D,E,F,G,H,Ret.
The current code will copy C into B, E into D and G into F and yield the layout
A,C,B(C),E,D(E),F(G),G,H,ret
define void @straight_test(i32 %tag) {
entry:
br label %test1
test1: ; A
%tagbit1 = and i32 %tag, 1
%tagbit1eq0 = icmp eq i32 %tagbit1, 0
br i1 %tagbit1eq0, label %test2, label %optional1
optional1: ; B
call void @a()
br label %test2
test2: ; C
%tagbit2 = and i32 %tag, 2
%tagbit2eq0 = icmp eq i32 %tagbit2, 0
br i1 %tagbit2eq0, label %test3, label %optional2
optional2: ; D
call void @b()
br label %test3
test3: ; E
%tagbit3 = and i32 %tag, 4
%tagbit3eq0 = icmp eq i32 %tagbit3, 0
br i1 %tagbit3eq0, label %test4, label %optional3
optional3: ; F
call void @c()
br label %test4
test4: ; G
%tagbit4 = and i32 %tag, 8
%tagbit4eq0 = icmp eq i32 %tagbit4, 0
br i1 %tagbit4eq0, label %exit, label %optional4
optional4: ; H
call void @d()
br label %exit
exit:
ret void
}
here is the layout after D27742:
straight_test: # @straight_test
; ... Prologue elided
; BB#0: # %entry ; A (merged with test1)
; ... More prologue elided
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_2
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_3
b .LBB0_4
.LBB0_2: # %optional1 ; B (copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_4
.LBB0_3: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_5
b .LBB0_6
.LBB0_4: # %optional2 ; D (copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_5: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
b .LBB0_7
.LBB0_6: # %optional3 ; F (copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit ; Ret
ld 30, 96(1) # 8-byte Folded Reload
addi 1, 1, 112
ld 0, 16(1)
mtlr 0
blr
The tail-duplication has produced some benefit, but it has also produced a
trellis which is not laid out optimally. With this patch, we improve the layouts
of such trellises, and decrease the cost calculation for tail-duplication
accordingly.
This patch produces the layout A,C,E,G,B,D,F,H,Ret. This layout does have
back edges, which is a negative, but it has a bigger compensating
positive, which is that it handles the case where there are long strings
of skipped blocks much better than the original layout. Both layouts
handle runs of executed blocks equally well. Branch prediction also
improves if there is any correlation between subsequent optional blocks.
Here is the resulting concrete layout:
straight_test: # @straight_test
; BB#0: # %entry ; A (merged with test1)
mr 30, 3
andi. 3, 30, 1
bc 12, 1, .LBB0_4
; BB#1: # %test2 ; C
rlwinm. 3, 30, 0, 30, 30
bne 0, .LBB0_5
.LBB0_2: # %test3 ; E
rlwinm. 3, 30, 0, 29, 29
bne 0, .LBB0_6
.LBB0_3: # %test4 ; G
rlwinm. 3, 30, 0, 28, 28
bne 0, .LBB0_7
b .LBB0_8
.LBB0_4: # %optional1 ; B (Copy of C)
bl a
nop
rlwinm. 3, 30, 0, 30, 30
beq 0, .LBB0_2
.LBB0_5: # %optional2 ; D (Copy of E)
bl b
nop
rlwinm. 3, 30, 0, 29, 29
beq 0, .LBB0_3
.LBB0_6: # %optional3 ; F (Copy of G)
bl c
nop
rlwinm. 3, 30, 0, 28, 28
beq 0, .LBB0_8
.LBB0_7: # %optional4 ; H
bl d
nop
.LBB0_8: # %exit
Differential Revision: https://reviews.llvm.org/D28522
llvm-svn: 295223
2017-02-16 03:49:14 +08:00
|
|
|
; CHECK: %end
|
2013-12-05 19:02:02 +08:00
|
|
|
; CHECK-NEXT: vpop {d7, d8}
|
|
|
|
; CHECK-NEXT: pop {r4, pc}
|
ARM: fix folding of stack-adjustment (yet again).
When trying to eliminate an "sub sp, sp, #N" instruction by folding
it into an existing push/pop using dummy registers, we need to account
for the fact that this might affect precisely how "fp" gets set in the
prologue.
We were attempting this, but assuming that *whenever* we performed a
fold it would make a difference. This is false, for example, in:
push {r4, r7, lr}
add fp, sp, #4
vpush {d8}
sub sp, sp, #8
we can fold the "sub" into the "vpush", forming "vpush {d7, d8}".
However, in that case the "add fp" instruction mustn't change, which
we were getting wrong before.
Should fix PR18160.
llvm-svn: 196725
2013-12-08 23:56:50 +08:00
|
|
|
|
|
|
|
; With a guaranteed frame-pointer, we want to make sure that its offset in the
|
|
|
|
; push block is correct, even if a few registers have been tacked onto a later
|
|
|
|
; vpush (PR18160).
|
|
|
|
; CHECK-IOS: push {r4, r7, lr}
|
|
|
|
; CHECK-IOS-NEXT: add r7, sp, #4
|
|
|
|
; CHECK-IOS-NEXT: vpush {d7, d8}
|
|
|
|
|
2013-12-05 19:02:02 +08:00
|
|
|
; We want some memory so there's a stack adjustment to fold...
|
|
|
|
%var = alloca i8, i32 8
|
|
|
|
|
|
|
|
; We want a long-lived floating register so that a callee-saved dN is used and
|
|
|
|
; there's both a vpop and a pop.
|
2015-02-28 05:17:42 +08:00
|
|
|
%live_val = load double, double* @dbl
|
2013-12-05 19:02:02 +08:00
|
|
|
br i1 %tst, label %true, label %end
|
|
|
|
true:
|
|
|
|
call void @bar(i8* %var)
|
|
|
|
store double %live_val, double* @dbl
|
|
|
|
br label %end
|
|
|
|
end:
|
|
|
|
; We want the epilogue to be the only thing in a basic block so that we hit
|
|
|
|
; the correct edge-case (first inst in block is correct one to adjust).
|
|
|
|
ret void
|
2014-01-06 22:28:05 +08:00
|
|
|
}
|
2014-01-15 06:53:28 +08:00
|
|
|
|
|
|
|
define void @test_varsize(...) minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: test_varsize:
|
2014-01-15 06:53:28 +08:00
|
|
|
; CHECK-T1: sub sp, #16
|
2014-07-23 15:08:53 +08:00
|
|
|
; CHECK-T1: push {r5, r6, r7, lr}
|
2014-01-15 06:53:28 +08:00
|
|
|
; ...
|
2014-07-23 15:08:53 +08:00
|
|
|
; CHECK-T1: pop {r2, r3, r7}
|
2015-07-21 05:42:14 +08:00
|
|
|
; CHECK-T1: pop {[[POP_REG:r[0-3]]]}
|
2014-01-15 06:53:28 +08:00
|
|
|
; CHECK-T1: add sp, #16
|
2015-07-21 05:42:14 +08:00
|
|
|
; CHECK-T1: bx [[POP_REG]]
|
2014-01-15 06:53:28 +08:00
|
|
|
|
|
|
|
; CHECK: sub sp, #16
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: push {r5, r6, r7, lr}
|
2014-01-15 06:53:28 +08:00
|
|
|
; ...
|
2015-04-24 04:31:26 +08:00
|
|
|
; CHECK: pop.w {r2, r3, r7, lr}
|
2014-01-15 06:53:28 +08:00
|
|
|
; CHECK: add sp, #16
|
|
|
|
; CHECK: bx lr
|
|
|
|
|
|
|
|
%var = alloca i8, i32 8
|
2014-08-23 05:59:26 +08:00
|
|
|
call void @llvm.va_start(i8* %var)
|
2014-01-15 06:53:28 +08:00
|
|
|
call void @bar(i8* %var)
|
|
|
|
ret void
|
|
|
|
}
|
2014-03-21 07:28:16 +08:00
|
|
|
|
|
|
|
%"MyClass" = type { i8*, i32, i32, float, float, float, [2 x i8], i32, i32* }
|
|
|
|
|
|
|
|
declare float @foo()
|
|
|
|
|
|
|
|
declare void @bar3()
|
|
|
|
|
|
|
|
declare %"MyClass"* @bar2(%"MyClass"* returned, i16*, i32, float, float, i32, i32, i1 zeroext, i1 zeroext, i32)
|
|
|
|
|
|
|
|
define fastcc float @check_vfp_no_return_clobber2(i16* %r, i16* %chars, i32 %length, i1 zeroext %flag) minsize {
|
|
|
|
entry:
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: check_vfp_no_return_clobber2
|
2014-03-21 07:28:16 +08:00
|
|
|
; CHECK-LINUX: vpush {d0, d1, d2, d3, d4, d5, d6, d7, d8}
|
|
|
|
; CHECK-NOT: sub sp,
|
|
|
|
; ...
|
|
|
|
; CHECK-LINUX: add sp
|
|
|
|
; CHECK-LINUX: vpop {d8}
|
|
|
|
%run = alloca %"MyClass", align 4
|
|
|
|
%call = call %"MyClass"* @bar2(%"MyClass"* %run, i16* %chars, i32 %length, float 0.000000e+00, float 0.000000e+00, i32 1, i32 1, i1 zeroext false, i1 zeroext true, i32 3)
|
|
|
|
%call1 = call float @foo()
|
|
|
|
%cmp = icmp eq %"MyClass"* %run, null
|
|
|
|
br i1 %cmp, label %exit, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
call void @bar3()
|
|
|
|
br label %exit
|
|
|
|
|
|
|
|
exit: ; preds = %if.then, %entry
|
|
|
|
ret float %call1
|
|
|
|
}
|
2014-08-23 05:59:26 +08:00
|
|
|
|
ARM: don't rely on push/pop reglists being in order when folding SP adjust.
It would be a very nice invariant to rely on, but unfortunately it doesn't
necessarily hold (and the causes of mis-sorted reglists appear to be quite
varied) so to be robust the frame lowering code can't assume that the first
register in the list is also the first one that actually gets pushed.
Should fix an issue where we were turning something like:
push {r8, r4, r7, lr}
sub sp, #24
into nonsense like:
push {r2, r3, r4, r5, r6, r7, r8, r4, r7, lr}
llvm-svn: 285232
2016-10-27 04:01:00 +08:00
|
|
|
declare void @use_arr(i32*)
|
|
|
|
define void @test_fold_reuse() minsize {
|
2019-04-02 07:55:57 +08:00
|
|
|
; CHECK-FNSTART-LABEL: test_fold_reuse:
|
ARM: don't rely on push/pop reglists being in order when folding SP adjust.
It would be a very nice invariant to rely on, but unfortunately it doesn't
necessarily hold (and the causes of mis-sorted reglists appear to be quite
varied) so to be robust the frame lowering code can't assume that the first
register in the list is also the first one that actually gets pushed.
Should fix an issue where we were turning something like:
push {r8, r4, r7, lr}
sub sp, #24
into nonsense like:
push {r2, r3, r4, r5, r6, r7, r8, r4, r7, lr}
llvm-svn: 285232
2016-10-27 04:01:00 +08:00
|
|
|
; CHECK: push.w {r4, r7, r8, lr}
|
|
|
|
; CHECK: sub sp, #24
|
|
|
|
; [...]
|
|
|
|
; CHECK: add sp, #24
|
|
|
|
; CHECK: pop.w {r4, r7, r8, pc}
|
|
|
|
%arr = alloca i8, i32 24
|
|
|
|
call void asm sideeffect "", "~{r8},~{r4}"()
|
|
|
|
call void @bar(i8* %arr)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2019-04-02 07:55:57 +08:00
|
|
|
; It doesn't matter what registers this pushes and pops; just make sure
|
|
|
|
; it doesn't try to push/pop an illegal register on Thumb1.
|
|
|
|
define void @test_long_fn() minsize nounwind optsize {
|
|
|
|
; CHECK-FNSTART-LABEL: test_long_fn:
|
|
|
|
; CHECK-T1-NOFP: push {r7, lr}
|
|
|
|
; CHECK-T1-NOFP: pop {r3, pc}
|
|
|
|
entry:
|
|
|
|
%z = alloca i32, align 4
|
|
|
|
call void asm sideeffect ".space 3000", "r"(i32* nonnull %z)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-08-23 05:59:26 +08:00
|
|
|
declare void @llvm.va_start(i8*) nounwind
|