summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp36
-rw-r--r--test/Analysis/BasicAA/guards.ll30
2 files changed, 62 insertions, 4 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 14d92d633b9..0b5c85813b4 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -649,9 +649,9 @@ ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
}
-static bool isAssumeIntrinsic(ImmutableCallSite CS) {
+static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- return II && II->getIntrinsicID() == Intrinsic::assume;
+ return II && II->getIntrinsicID() == IID;
}
#ifndef NDEBUG
@@ -769,9 +769,19 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// While the assume intrinsic is marked as arbitrarily writing so that
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
- if (isAssumeIntrinsic(CS))
+ if (isIntrinsicCall(CS, Intrinsic::assume))
return MRI_NoModRef;
+ // Like assumes, guard intrinsics are also marked as arbitrarily writing so
+ // that proper control dependencies are maintained but they never mods any
+ // particular memory location.
+ //
+ // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
+ // heap state at the point the guard is issued needs to be consistent in case
+ // the guard invokes the "deopt" continuation.
+ if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
+ return MRI_Ref;
+
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS, Loc);
}
@@ -781,9 +791,27 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// While the assume intrinsic is marked as arbitrarily writing so that
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
- if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
+ if (isIntrinsicCall(CS1, Intrinsic::assume) ||
+ isIntrinsicCall(CS2, Intrinsic::assume))
return MRI_NoModRef;
+ // Like assumes, guard intrinsics are also marked as arbitrarily writing so
+ // that proper control dependencies are maintained but they never mod any
+ // particular memory location.
+ //
+ // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
+ // heap state at the point the guard is issued needs to be consistent in case
+ // the guard invokes the "deopt" continuation.
+
+ // NB! This function is *not* commutative, so we specical case two
+ // possibilities for guard intrinsics.
+
+ if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
+ return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
+
+ if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
+ return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
+
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS1, CS2);
}
diff --git a/test/Analysis/BasicAA/guards.ll b/test/Analysis/BasicAA/guards.ll
new file mode 100644
index 00000000000..66cfb156b7d
--- /dev/null
+++ b/test/Analysis/BasicAA/guards.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #0
+declare void @llvm.experimental.guard(i1, ...)
+declare void @unknown_but_readonly() readonly
+
+define void @test1(i8* %P, i8* %Q) {
+ tail call void(i1,...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+ ret void
+
+; CHECK-LABEL: Function: test1:
+
+; CHECK: Just Ref: Ptr: i8* %P <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Ref: tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ] <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
+; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+}
+
+define void @test2() {
+ tail call void(i1,...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+ tail call void @unknown_but_readonly()
+ ret void
+; CHECK-LABEL: Function: test2:
+; CHECK: NoModRef: tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ] <-> tail call void @unknown_but_readonly()
+; CHECK: NoModRef: tail call void @unknown_but_readonly() <-> tail call void (i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
+}