2018-12-13 20:25:20 +08:00
|
|
|
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
|
|
|
|
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -verify-machineinstrs | FileCheck -check-prefix=CHECK-NOAV %s
|
2013-03-22 05:37:52 +08:00
|
|
|
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
|
|
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
%struct.__jmp_buf_tag = type { [64 x i64], i32, %struct.__sigset_t, [8 x i8] }
|
|
|
|
%struct.__sigset_t = type { [16 x i64] }
|
|
|
|
|
|
|
|
@env_sigill = internal global [1 x %struct.__jmp_buf_tag] zeroinitializer, align 16
|
2018-12-13 20:25:20 +08:00
|
|
|
@cond = external global i8, align 1
|
2013-03-22 05:37:52 +08:00
|
|
|
|
|
|
|
define void @foo() #0 {
|
|
|
|
entry:
|
|
|
|
call void @llvm.eh.sjlj.longjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
|
|
|
|
unreachable
|
|
|
|
|
|
|
|
; CHECK: @foo
|
|
|
|
; CHECK: addis [[REG:[0-9]+]], 2, env_sigill@toc@ha
|
|
|
|
; CHECK: addi [[REG]], [[REG]], env_sigill@toc@l
|
|
|
|
; CHECK: ld 31, 0([[REG]])
|
|
|
|
; CHECK: ld [[REG2:[0-9]+]], 8([[REG]])
|
[PowerPC] Fix the PPCInstrInfo::getInstrLatency implementation
PowerPC uses itineraries to describe processor pipelines (and dispatch-group
restrictions for P7/P8 cores). Unfortunately, the target-independent
implementation of TII.getInstrLatency calls ItinData->getStageLatency, and that
looks for the largest cycle count in the pipeline for any given instruction.
This, however, yields the wrong answer for the PPC itineraries, because we
don't encode the full pipeline. Because the functional units are fully
pipelined, we only model the initial stages (there are no relevant hazards in
the later stages to model), and so the technique employed by getStageLatency
does not really work. Instead, we should take the maximum output operand
latency, and that's what PPCInstrInfo::getInstrLatency now does.
This caused some test-case churn, including two unfortunate side effects.
First, the new arrangement of copies we get from function parameters now
sometimes blocks VSX FMA mutation (a FIXME has been added to the code and the
test cases), and we have one significant test-suite regression:
SingleSource/Benchmarks/BenchmarkGame/spectral-norm
56.4185% +/- 18.9398%
In this benchmark we have a loop with a vectorized FP divide, and it with the
new scheduling both divides end up in the same dispatch group (which in this
case seems to cause a problem, although why is not exactly clear). The grouping
structure is hard to predict from the bottom of the loop, and there may not be
much we can do to fix this.
Very few other test-suite performance effects were really significant, but
almost all weakly favor this change. However, in light of the issues
highlighted above, I've left the old behavior available via a
command-line flag.
llvm-svn: 242188
2015-07-15 04:02:02 +08:00
|
|
|
; CHECK-DAG: ld 1, 16([[REG]])
|
|
|
|
; CHECK-DAG: mtctr [[REG2]]
|
|
|
|
; CHECK-DAG: ld 30, 32([[REG]])
|
|
|
|
; CHECK-DAG: ld 2, 24([[REG]])
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK: bctr
|
|
|
|
|
|
|
|
return: ; No predecessors!
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @llvm.eh.sjlj.longjmp(i8*) #1
|
|
|
|
|
|
|
|
define signext i32 @main() #0 {
|
|
|
|
entry:
|
|
|
|
%retval = alloca i32, align 4
|
|
|
|
store i32 0, i32* %retval
|
|
|
|
%0 = call i8* @llvm.frameaddress(i32 0)
|
|
|
|
store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
|
|
|
|
%1 = call i8* @llvm.stacksave()
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
|
2013-03-22 05:37:52 +08:00
|
|
|
%2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
|
|
|
|
%tobool = icmp ne i32 %2, 0
|
|
|
|
br i1 %tobool, label %if.then, label %if.else
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
store i32 1, i32* %retval
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
call void @foo()
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else
|
|
|
|
store i32 0, i32* %retval
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
return: ; preds = %if.end, %if.then
|
2015-02-28 05:17:42 +08:00
|
|
|
%3 = load i32, i32* %retval
|
2013-03-22 05:37:52 +08:00
|
|
|
ret i32 %3
|
|
|
|
|
2013-03-27 08:02:20 +08:00
|
|
|
; FIXME: We should be saving VRSAVE on Darwin, but we're not!
|
|
|
|
|
2017-10-12 02:04:41 +08:00
|
|
|
; CHECK-LABEL: main:
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK: std
|
2013-03-27 08:02:20 +08:00
|
|
|
; Make sure that we're not saving VRSAVE on non-Darwin:
|
|
|
|
; CHECK-NOT: mfspr
|
2013-03-22 05:37:52 +08:00
|
|
|
|
2013-09-12 13:24:49 +08:00
|
|
|
; CHECK-DAG: stfd
|
2016-10-04 14:59:23 +08:00
|
|
|
; CHECK-DAG: stxvd2x
|
2013-09-12 13:24:49 +08:00
|
|
|
|
|
|
|
; CHECK-DAG: addis [[REG:[0-9]+]], 2, env_sigill@toc@ha
|
|
|
|
; CHECK-DAG: std 31, env_sigill@toc@l([[REG]])
|
|
|
|
; CHECK-DAG: addi [[REGA:[0-9]+]], [[REG]], env_sigill@toc@l
|
|
|
|
; CHECK-DAG: std [[REGA]], [[OFF:[0-9]+]](31) # 8-byte Folded Spill
|
|
|
|
; CHECK-DAG: std 1, 16([[REGA]])
|
|
|
|
; CHECK-DAG: std 2, 24([[REGA]])
|
Codegen: Tail-duplicate during placement.
The tail duplication pass uses an assumed layout when making duplication
decisions. This is fine, but passes up duplication opportunities that
may arise when blocks are outlined. Because we want the updated CFG to
affect subsequent placement decisions, this change must occur during
placement.
In order to achieve this goal, TailDuplicationPass is split into a
utility class, TailDuplicator, and the pass itself. The pass delegates
nearly everything to the TailDuplicator object, except for looping over
the blocks in a function. This allows the same code to be used for tail
duplication in both places.
This change, in concert with outlining optional branches, allows
triangle shaped code to perform much better, esepecially when the
taken/untaken branches are correlated, as it creates a second spine when
the tests are small enough.
Issue from previous rollback fixed, and a new test was added for that
case as well. Issue was worklist/scheduling/taildup issue in layout.
Issue from 2nd rollback fixed, with 2 additional tests. Issue was
tail merging/loop info/tail-duplication causing issue with loops that share
a header block.
Issue with early tail-duplication of blocks that branch to a fallthrough
predecessor fixed with test case: tail-dup-branch-to-fallthrough.ll
Differential revision: https://reviews.llvm.org/D18226
llvm-svn: 283934
2016-10-12 04:36:43 +08:00
|
|
|
; CHECK: bcl 20, 31, .LBB1_3
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK: li 3, 1
|
Codegen: Tail-duplicate during placement.
The tail duplication pass uses an assumed layout when making duplication
decisions. This is fine, but passes up duplication opportunities that
may arise when blocks are outlined. Because we want the updated CFG to
affect subsequent placement decisions, this change must occur during
placement.
In order to achieve this goal, TailDuplicationPass is split into a
utility class, TailDuplicator, and the pass itself. The pass delegates
nearly everything to the TailDuplicator object, except for looping over
the blocks in a function. This allows the same code to be used for tail
duplication in both places.
This change, in concert with outlining optional branches, allows
triangle shaped code to perform much better, esepecially when the
taken/untaken branches are correlated, as it creates a second spine when
the tests are small enough.
Issue from previous rollback fixed, and a new test was added for that
case as well. Issue was worklist/scheduling/taildup issue in layout.
Issue from 2nd rollback fixed, with 2 additional tests. Issue was
tail merging/loop info/tail-duplication causing issue with loops that share
a header block.
Issue with early tail-duplication of blocks that branch to a fallthrough
predecessor fixed with test case: tail-dup-branch-to-fallthrough.ll
Differential revision: https://reviews.llvm.org/D18226
llvm-svn: 283934
2016-10-12 04:36:43 +08:00
|
|
|
; CHECK: #EH_SjLj_Setup .LBB1_3
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.1:
|
2013-03-22 05:37:52 +08:00
|
|
|
|
Codegen: Tail-duplicate during placement.
The tail duplication pass uses an assumed layout when making duplication
decisions. This is fine, but passes up duplication opportunities that
may arise when blocks are outlined. Because we want the updated CFG to
affect subsequent placement decisions, this change must occur during
placement.
In order to achieve this goal, TailDuplicationPass is split into a
utility class, TailDuplicator, and the pass itself. The pass delegates
nearly everything to the TailDuplicator object, except for looping over
the blocks in a function. This allows the same code to be used for tail
duplication in both places.
This change, in concert with outlining optional branches, allows
triangle shaped code to perform much better, esepecially when the
taken/untaken branches are correlated, as it creates a second spine when
the tests are small enough.
Issue from previous rollback fixed, and a new test was added for that
case as well. Issue was worklist/scheduling/taildup issue in layout.
Issue from 2nd rollback fixed, with 2 additional tests. Issue was
tail merging/loop info/tail-duplication causing issue with loops that share
a header block.
Issue with early tail-duplication of blocks that branch to a fallthrough
predecessor fixed with test case: tail-dup-branch-to-fallthrough.ll
Differential revision: https://reviews.llvm.org/D18226
llvm-svn: 283934
2016-10-12 04:36:43 +08:00
|
|
|
; CHECK: .LBB1_3:
|
|
|
|
; CHECK: mflr [[REGL:[0-9]+]]
|
|
|
|
; CHECK: ld [[REG2:[0-9]+]], [[OFF]](31) # 8-byte Folded Reload
|
|
|
|
; CHECK: std [[REGL]], 8([[REG2]])
|
|
|
|
; CHECK: li 3, 0
|
|
|
|
|
|
|
|
; CHECK: .LBB1_5:
|
2013-03-22 05:37:52 +08:00
|
|
|
|
2017-10-12 02:04:41 +08:00
|
|
|
; CHECK-DAG: lfd
|
|
|
|
; CHECK-DAG: lxvd2x
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK: ld
|
|
|
|
; CHECK: blr
|
|
|
|
|
2017-10-12 02:04:41 +08:00
|
|
|
; CHECK-NOAV-LABEL: main:
|
2016-10-04 14:59:23 +08:00
|
|
|
; CHECK-NOAV-NOT: stxvd2x
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK-NOAV: bcl
|
|
|
|
; CHECK-NOAV: mflr
|
|
|
|
; CHECK-NOAV: bl foo
|
2016-10-04 14:59:23 +08:00
|
|
|
; CHECK-NOAV-NOT: lxvd2x
|
2013-03-22 05:37:52 +08:00
|
|
|
; CHECK-NOAV: blr
|
|
|
|
}
|
|
|
|
|
2013-07-18 07:50:51 +08:00
|
|
|
define signext i32 @main2() #0 {
|
|
|
|
entry:
|
|
|
|
%a = alloca i8, align 64
|
|
|
|
call void @bar(i8* %a)
|
|
|
|
%retval = alloca i32, align 4
|
|
|
|
store i32 0, i32* %retval
|
|
|
|
%0 = call i8* @llvm.frameaddress(i32 0)
|
|
|
|
store i8* %0, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**)
|
|
|
|
%1 = call i8* @llvm.stacksave()
|
2015-03-14 02:20:45 +08:00
|
|
|
store i8* %1, i8** getelementptr (i8*, i8** bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8**), i32 2)
|
2013-07-18 07:50:51 +08:00
|
|
|
%2 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
|
|
|
|
%tobool = icmp ne i32 %2, 0
|
|
|
|
br i1 %tobool, label %if.then, label %if.else
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
store i32 1, i32* %retval
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
|
|
|
call void @foo()
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else
|
|
|
|
store i32 0, i32* %retval
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
return: ; preds = %if.end, %if.then
|
2015-02-28 05:17:42 +08:00
|
|
|
%3 = load i32, i32* %retval
|
2013-07-18 07:50:51 +08:00
|
|
|
ret i32 %3
|
|
|
|
|
2017-10-12 02:04:41 +08:00
|
|
|
; CHECK-LABEL: main2:
|
2013-07-18 07:50:51 +08:00
|
|
|
|
|
|
|
; CHECK: addis [[REG:[0-9]+]], 2, env_sigill@toc@ha
|
2013-12-01 03:52:28 +08:00
|
|
|
; CHECK-DAG: std 31, env_sigill@toc@l([[REG]])
|
|
|
|
; CHECK-DAG: addi [[REGB:[0-9]+]], [[REG]], env_sigill@toc@l
|
2013-09-12 13:24:49 +08:00
|
|
|
; CHECK-DAG: std [[REGB]], [[OFF:[0-9]+]](31) # 8-byte Folded Spill
|
|
|
|
; CHECK-DAG: std 1, 16([[REGB]])
|
|
|
|
; CHECK-DAG: std 2, 24([[REGB]])
|
|
|
|
; CHECK-DAG: std 30, 32([[REGB]])
|
2013-07-18 07:50:51 +08:00
|
|
|
; CHECK: bcl 20, 31,
|
|
|
|
|
|
|
|
; CHECK: blr
|
|
|
|
}
|
|
|
|
|
2018-12-13 20:25:20 +08:00
|
|
|
define void @test_sjlj_setjmp() #0 {
|
|
|
|
entry:
|
|
|
|
%0 = load i8, i8* @cond, align 1
|
|
|
|
%tobool = trunc i8 %0 to i1
|
|
|
|
br i1 %tobool, label %return, label %end
|
|
|
|
|
|
|
|
end:
|
|
|
|
%1 = call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([1 x %struct.__jmp_buf_tag]* @env_sigill to i8*))
|
|
|
|
br label %return
|
|
|
|
|
|
|
|
return:
|
|
|
|
ret void
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_sjlj_setjmp:
|
|
|
|
; intrinsic llvm.eh.sjlj.setjmp does not call builtin function _setjmp.
|
|
|
|
; CHECK-NOT: bl _setjmp
|
|
|
|
}
|
|
|
|
|
2013-07-18 07:50:51 +08:00
|
|
|
declare void @bar(i8*) #3
|
|
|
|
|
2013-03-22 05:37:52 +08:00
|
|
|
declare i8* @llvm.frameaddress(i32) #2
|
|
|
|
|
|
|
|
declare i8* @llvm.stacksave() #3
|
|
|
|
|
|
|
|
declare i32 @llvm.eh.sjlj.setjmp(i8*) #3
|
|
|
|
|
2013-08-23 05:28:54 +08:00
|
|
|
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
|
2013-03-22 05:37:52 +08:00
|
|
|
attributes #1 = { noreturn nounwind }
|
|
|
|
attributes #2 = { nounwind readnone }
|
|
|
|
attributes #3 = { nounwind }
|
|
|
|
|