forked from OSchip/llvm-project
[BasicAA] Guard intrinsics don't write to memory
Summary: The idea is very close to what we do for assume intrinsics: we mark the guard intrinsics as writing to arbitrary memory to maintain control dependence, but under the covers we teach AA that they do not mod any particular memory location. Reviewers: chandlerc, hfinkel, gbiv, reames Subscribers: george.burgess.iv, mcrosier, llvm-commits Differential Revision: http://reviews.llvm.org/D19575 llvm-svn: 269007
This commit is contained in:
parent
a00b97f780
commit
d47f42435a
|
@ -649,9 +649,9 @@ ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
|
|||
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
|
||||
}
|
||||
|
||||
static bool isAssumeIntrinsic(ImmutableCallSite CS) {
|
||||
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
|
||||
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
|
||||
return II && II->getIntrinsicID() == Intrinsic::assume;
|
||||
return II && II->getIntrinsicID() == IID;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
|
@ -769,9 +769,19 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
|||
// While the assume intrinsic is marked as arbitrarily writing so that
|
||||
// proper control dependencies will be maintained, it never aliases any
|
||||
// particular memory location.
|
||||
if (isAssumeIntrinsic(CS))
|
||||
if (isIntrinsicCall(CS, Intrinsic::assume))
|
||||
return MRI_NoModRef;
|
||||
|
||||
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
|
||||
// that proper control dependencies are maintained but they never mods any
|
||||
// particular memory location.
|
||||
//
|
||||
// *Unlike* assumes, guard intrinsics are modeled as reading memory since the
|
||||
// heap state at the point the guard is issued needs to be consistent in case
|
||||
// the guard invokes the "deopt" continuation.
|
||||
if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
|
||||
return MRI_Ref;
|
||||
|
||||
// The AAResultBase base class has some smarts, lets use them.
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
}
|
||||
|
@ -781,9 +791,27 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
|
|||
// While the assume intrinsic is marked as arbitrarily writing so that
|
||||
// proper control dependencies will be maintained, it never aliases any
|
||||
// particular memory location.
|
||||
if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
|
||||
if (isIntrinsicCall(CS1, Intrinsic::assume) ||
|
||||
isIntrinsicCall(CS2, Intrinsic::assume))
|
||||
return MRI_NoModRef;
|
||||
|
||||
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
|
||||
// that proper control dependencies are maintained but they never mod any
|
||||
// particular memory location.
|
||||
//
|
||||
// *Unlike* assumes, guard intrinsics are modeled as reading memory since the
|
||||
// heap state at the point the guard is issued needs to be consistent in case
|
||||
// the guard invokes the "deopt" continuation.
|
||||
|
||||
// NB! This function is *not* commutative, so we specical case two
|
||||
// possibilities for guard intrinsics.
|
||||
|
||||
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
|
||||
return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
|
||||
|
||||
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
|
||||
return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
|
||||
|
||||
// The AAResultBase base class has some smarts, lets use them.
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
|
||||
|
||||
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #0
|
||||
declare void @llvm.experimental.guard(i1, ...)
|
||||
declare void @unknown_but_readonly() readonly
|
||||
|
||||
define void @test1(i8* %P, i8* %Q) {
|
||||
tail call void(i1,...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: Function: test1:
|
||||
|
||||
; CHECK: Just Ref: Ptr: i8* %P <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
|
||||
; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
|
||||
; CHECK: Just Ref: tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ] <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
|
||||
; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
}
|
||||
|
||||
define void @test2() {
|
||||
tail call void(i1,...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
tail call void @unknown_but_readonly()
|
||||
ret void
|
||||
; CHECK-LABEL: Function: test2:
|
||||
; CHECK: NoModRef: tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ] <-> tail call void @unknown_but_readonly()
|
||||
; CHECK: NoModRef: tail call void @unknown_but_readonly() <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
|
||||
}
|
Loading…
Reference in New Issue