2020-08-06 02:12:29 +08:00
|
|
|
; RUN: opt -basic-aa -print-memoryssa -verify-memoryssa -enable-new-pm=0 -analyze < %s 2>&1 | FileCheck %s
|
2016-06-02 05:30:40 +08:00
|
|
|
; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
%struct.hoge = type { i32, %struct.widget }
|
|
|
|
%struct.widget = type { i64 }
|
|
|
|
|
|
|
|
define hidden void @quux(%struct.hoge *%f) align 2 {
|
|
|
|
%tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
|
|
|
|
%tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
|
|
|
|
%tmp25 = bitcast %struct.widget* %tmp24 to i64**
|
|
|
|
br label %bb26
|
|
|
|
|
|
|
|
bb26: ; preds = %bb77, %0
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
|
2016-02-03 06:46:49 +08:00
|
|
|
; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
|
|
|
|
br i1 undef, label %bb68, label %bb77
|
|
|
|
|
|
|
|
bb68: ; preds = %bb26
|
|
|
|
; CHECK: MemoryUse(liveOnEntry)
|
|
|
|
; CHECK-NEXT: %tmp69 = load i64, i64* null, align 8
|
|
|
|
%tmp69 = load i64, i64* null, align 8
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 1 = MemoryDef(3)
|
2016-02-03 06:46:49 +08:00
|
|
|
; CHECK-NEXT: store i64 %tmp69, i64* %tmp, align 8
|
|
|
|
store i64 %tmp69, i64* %tmp, align 8
|
|
|
|
br label %bb77
|
|
|
|
|
|
|
|
bb77: ; preds = %bb68, %bb26
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 2 = MemoryPhi({bb26,3},{bb68,1})
|
|
|
|
; CHECK: MemoryUse(2)
|
2016-02-03 06:46:49 +08:00
|
|
|
; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
%tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
%tmp79 = getelementptr inbounds i64, i64* %tmp78, i64 undef
|
|
|
|
br label %bb26
|
|
|
|
}
|
2016-03-24 02:31:55 +08:00
|
|
|
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
define hidden void @quux_no_null_opt(%struct.hoge *%f) align 2 #0 {
|
|
|
|
; CHECK-LABEL: quux_no_null_opt(
|
|
|
|
%tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
|
|
|
|
%tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
|
|
|
|
%tmp25 = bitcast %struct.widget* %tmp24 to i64**
|
|
|
|
br label %bb26
|
|
|
|
|
|
|
|
bb26: ; preds = %bb77, %0
|
|
|
|
; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
|
|
|
|
; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
|
|
|
|
br i1 undef, label %bb68, label %bb77
|
|
|
|
|
|
|
|
bb68: ; preds = %bb26
|
|
|
|
; CHECK: MemoryUse(3)
|
|
|
|
; CHECK-NEXT: %tmp69 = load i64, i64* null, align 8
|
|
|
|
%tmp69 = load i64, i64* null, align 8
|
|
|
|
; CHECK: 1 = MemoryDef(3)
|
|
|
|
; CHECK-NEXT: store i64 %tmp69, i64* %tmp, align 8
|
|
|
|
store i64 %tmp69, i64* %tmp, align 8
|
|
|
|
br label %bb77
|
|
|
|
|
|
|
|
bb77: ; preds = %bb68, %bb26
|
|
|
|
; CHECK: 2 = MemoryPhi({bb26,3},{bb68,1})
|
|
|
|
; CHECK: MemoryUse(2)
|
|
|
|
; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
%tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
%tmp79 = getelementptr inbounds i64, i64* %tmp78, i64 undef
|
|
|
|
br label %bb26
|
|
|
|
}
|
|
|
|
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-LABEL: define void @quux_skip
|
|
|
|
define void @quux_skip(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
|
|
|
|
%tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
|
|
|
|
%tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
|
|
|
|
%tmp25 = bitcast %struct.widget* %tmp24 to i64**
|
|
|
|
br label %bb26
|
|
|
|
|
|
|
|
bb26: ; preds = %bb77, %0
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
|
|
|
|
br i1 undef, label %bb68, label %bb77
|
|
|
|
|
|
|
|
bb68: ; preds = %bb26
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: MemoryUse(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
|
|
|
|
%tmp69 = load i64, i64* %g, align 8
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 1 = MemoryDef(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
|
|
|
|
store i64 %tmp69, i64* %g, align 8
|
|
|
|
br label %bb77
|
|
|
|
|
|
|
|
bb77: ; preds = %bb68, %bb26
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 2 = MemoryPhi({bb26,3},{bb68,1})
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
; CHECK: MemoryUse(liveOnEntry)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
%tmp78 = load i64*, i64** %tmp25, align 8
|
|
|
|
br label %bb26
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: define void @quux_dominated
|
|
|
|
define void @quux_dominated(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
|
|
|
|
%tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
|
|
|
|
%tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
|
|
|
|
%tmp25 = bitcast %struct.widget* %tmp24 to i64**
|
|
|
|
br label %bb26
|
|
|
|
|
|
|
|
bb26: ; preds = %bb77, %0
|
2016-11-22 03:33:02 +08:00
|
|
|
; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
|
|
|
|
; CHECK: MemoryUse(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: load i64*, i64** %tmp25, align 8
|
|
|
|
load i64*, i64** %tmp25, align 8
|
|
|
|
br i1 undef, label %bb68, label %bb77
|
|
|
|
|
|
|
|
bb68: ; preds = %bb26
|
2016-11-22 03:33:02 +08:00
|
|
|
; CHECK: MemoryUse(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
|
|
|
|
%tmp69 = load i64, i64* %g, align 8
|
2016-11-22 03:33:02 +08:00
|
|
|
; CHECK: 1 = MemoryDef(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
|
|
|
|
store i64 %tmp69, i64* %g, align 8
|
|
|
|
br label %bb77
|
|
|
|
|
|
|
|
bb77: ; preds = %bb68, %bb26
|
2016-11-22 03:33:02 +08:00
|
|
|
; CHECK: 4 = MemoryPhi({bb26,3},{bb68,1})
|
|
|
|
; CHECK: 2 = MemoryDef(4)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: store i64* null, i64** %tmp25, align 8
|
|
|
|
store i64* null, i64** %tmp25, align 8
|
|
|
|
br label %bb26
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: define void @quux_nodominate
|
|
|
|
define void @quux_nodominate(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
|
|
|
|
%tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
|
|
|
|
%tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
|
|
|
|
%tmp25 = bitcast %struct.widget* %tmp24 to i64**
|
|
|
|
br label %bb26
|
|
|
|
|
|
|
|
bb26: ; preds = %bb77, %0
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK: MemoryUse(liveOnEntry)
|
|
|
|
; CHECK-NEXT: load i64*, i64** %tmp25, align 8
|
|
|
|
load i64*, i64** %tmp25, align 8
|
|
|
|
br i1 undef, label %bb68, label %bb77
|
|
|
|
|
|
|
|
bb68: ; preds = %bb26
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: MemoryUse(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
|
|
|
|
%tmp69 = load i64, i64* %g, align 8
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 1 = MemoryDef(3)
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
|
|
|
|
store i64 %tmp69, i64* %g, align 8
|
|
|
|
br label %bb77
|
|
|
|
|
|
|
|
bb77: ; preds = %bb68, %bb26
|
2018-05-16 02:40:29 +08:00
|
|
|
; CHECK: 2 = MemoryPhi({bb26,3},{bb68,1})
|
2016-03-24 02:31:55 +08:00
|
|
|
; CHECK-NEXT: br label %bb26
|
|
|
|
br label %bb26
|
|
|
|
}
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 06:27:23 +08:00
|
|
|
|
2020-04-25 18:57:07 +08:00
|
|
|
attributes #0 = { null_pointer_is_valid }
|