forked from OSchip/llvm-project
CoroSplit: Fix coroutine splitting for retcon and retcon.once
Summary: For retcon and retcon.once coroutines we assume that all uses of spills can be sunk past coro.begin. This simplifies handling of instructions that escape the address of an alloca. The current implementation would have issues if the address of the alloca is escaped before coro.begin. (It also has issues with casts before and uses of those casts after the coro.begin instruction) %alloca_addr = alloca ... %escape = ptrtoint %alloca_addr coro.begin store %escape to %alloca_addr rdar://60272809 Subscribers: hiraditya, modocache, mgrang, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81023
This commit is contained in:
parent
c2e27ac1ce
commit
2e4c5d1c48
|
@ -899,6 +899,23 @@ static Instruction *insertSpills(const SpillInfo &Spills, coro::Shape &Shape) {
|
|||
FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
|
||||
SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
|
||||
Shape.AllocaSpillBlock = SpillBlock;
|
||||
|
||||
// retcon and retcon.once lowering assumes all uses have been sunk.
|
||||
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce) {
|
||||
// If we found any allocas, replace all of their remaining uses with Geps.
|
||||
Builder.SetInsertPoint(&SpillBlock->front());
|
||||
for (auto &P : Allocas) {
|
||||
auto *G = GetFramePointer(P.second, P.first);
|
||||
|
||||
// We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
|
||||
// here, as we are changing location of the instruction.
|
||||
G->takeName(P.first);
|
||||
P.first->replaceAllUsesWith(G);
|
||||
P.first->eraseFromParent();
|
||||
}
|
||||
return FramePtr;
|
||||
}
|
||||
|
||||
// If we found any alloca, replace all of their remaining uses with GEP
|
||||
// instructions. Because new dbg.declare have been created for these alloca,
|
||||
// we also delete the original dbg.declare and replace other uses with undef.
|
||||
|
@ -1482,6 +1499,55 @@ static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
|
|||
}
|
||||
}
|
||||
|
||||
/// retcon and retcon.once conventions assume that all spill uses can be sunk
|
||||
/// after the coro.begin intrinsic.
|
||||
static void sinkSpillUsesAfterCoroBegin(Function &F, const SpillInfo &Spills,
|
||||
CoroBeginInst *CoroBegin) {
|
||||
DominatorTree Dom(F);
|
||||
|
||||
SmallSetVector<Instruction *, 32> ToMove;
|
||||
SmallVector<Instruction *, 32> Worklist;
|
||||
|
||||
// Collect all users that precede coro.begin.
|
||||
for (auto const &Entry : Spills) {
|
||||
auto *SpillDef = Entry.def();
|
||||
for (User *U : SpillDef->users()) {
|
||||
auto Inst = cast<Instruction>(U);
|
||||
if (Inst->getParent() != CoroBegin->getParent() ||
|
||||
Dom.dominates(CoroBegin, Inst))
|
||||
continue;
|
||||
if (ToMove.insert(Inst))
|
||||
Worklist.push_back(Inst);
|
||||
}
|
||||
}
|
||||
// Recursively collect users before coro.begin.
|
||||
while (!Worklist.empty()) {
|
||||
auto *Def = Worklist.back();
|
||||
Worklist.pop_back();
|
||||
for (User *U : Def->users()) {
|
||||
auto Inst = cast<Instruction>(U);
|
||||
if (Dom.dominates(CoroBegin, Inst))
|
||||
continue;
|
||||
if (ToMove.insert(Inst))
|
||||
Worklist.push_back(Inst);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by dominance.
|
||||
SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
|
||||
std::sort(InsertionList.begin(), InsertionList.end(),
|
||||
[&Dom](Instruction *A, Instruction *B) -> bool {
|
||||
// If a dominates b it should preceed (<) b.
|
||||
return Dom.dominates(A, B);
|
||||
});
|
||||
|
||||
Instruction *InsertPt = CoroBegin->getNextNode();
|
||||
for (Instruction *Inst : InsertionList)
|
||||
Inst->moveBefore(InsertPt);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
|
||||
eliminateSwiftError(F, Shape);
|
||||
|
||||
|
@ -1618,6 +1684,8 @@ void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
|
|||
}
|
||||
}
|
||||
LLVM_DEBUG(dump("Spills", Spills));
|
||||
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce)
|
||||
sinkSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
|
||||
Shape.FrameTy = buildFrameType(F, Shape, Spills);
|
||||
Shape.FramePtr = insertSpills(Spills, Shape);
|
||||
lowerLocalAllocas(LocalAllocas, DeadInstructions);
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "p:64:64:64"
|
||||
|
||||
declare void @prototype_f(i8*, i1)
|
||||
|
||||
declare noalias i8* @allocate(i32 %size)
|
||||
declare void @deallocate(i8* %ptr)
|
||||
declare void @init(i64 *%ptr)
|
||||
declare void @use(i8* %ptr)
|
||||
declare void @use_addr_val(i64 %val, {i64, i64}*%addr)
|
||||
|
||||
define { i8*, {i64, i64}* } @f(i8* %buffer) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
%tmp = alloca { i64, i64 }, align 8
|
||||
%proj.1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 0
|
||||
%proj.2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 1
|
||||
store i64 0, i64* %proj.1, align 8
|
||||
store i64 0, i64* %proj.2, align 8
|
||||
%cast = bitcast { i64, i64 }* %tmp to i8*
|
||||
%escape_addr = ptrtoint {i64, i64}* %tmp to i64
|
||||
%id = call token @llvm.coro.id.retcon.once(i32 32, i32 8, i8* %buffer, i8* bitcast (void (i8*, i1)* @prototype_f to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
%proj.2.2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp, i64 0, i32 1
|
||||
call void @init(i64 * %proj.1)
|
||||
call void @init(i64 * %proj.2.2)
|
||||
call void @use_addr_val(i64 %escape_addr, {i64, i64}* %tmp)
|
||||
%abort = call i1 (...) @llvm.coro.suspend.retcon.i1({i64, i64}* %tmp)
|
||||
br i1 %abort, label %end, label %resume
|
||||
|
||||
resume:
|
||||
call void @use(i8* %cast)
|
||||
br label %end
|
||||
|
||||
end:
|
||||
call i1 @llvm.coro.end(i8* %hdl, i1 0)
|
||||
unreachable
|
||||
}
|
||||
; Make sure we don't lose writes to the frame.
|
||||
; CHECK-LABEL: define { i8*, { i64, i64 }* } @f(i8* %buffer) {
|
||||
; CHECK: [[FRAMEPTR:%.*]] = bitcast i8* %buffer to %f.Frame*
|
||||
; CHECK: [[TMP:%.*]] = getelementptr inbounds %f.Frame, %f.Frame* [[FRAMEPTR]], i32 0, i32 0
|
||||
; CHECK: [[PROJ1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP]], i64 0, i32 0
|
||||
; CHECK: [[PROJ2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP]], i64 0, i32 1
|
||||
; CHECK: store i64 0, i64* [[PROJ1]]
|
||||
; CHECK: store i64 0, i64* [[PROJ2]]
|
||||
; CHECK: [[ESCAPED_ADDR:%.*]] = ptrtoint { i64, i64 }* [[TMP]] to i64
|
||||
; CHECK: call void @init(i64* [[PROJ1]])
|
||||
; CHECK: call void @init(i64* [[PROJ2]])
|
||||
; CHECK: call void @use_addr_val(i64 [[ESCAPED_ADDR]], { i64, i64 }* [[TMP]])
|
||||
|
||||
; CHECK-LABEL: define internal void @f.resume.0(i8* {{.*}} %0, i1 %1) {
|
||||
; CHECK: [[FRAMEPTR:%.*]] = bitcast i8* %0 to %f.Frame*
|
||||
; CHECK: resume:
|
||||
; CHECK: [[TMP:%.*]] = getelementptr inbounds %f.Frame, %f.Frame* [[FRAMEPTR]], i32 0, i32 0
|
||||
; CHECK: [[CAST:%.*]] = bitcast { i64, i64 }* [[TMP]] to i8*
|
||||
; CHECK: call void @use(i8* [[CAST]])
|
||||
|
||||
declare token @llvm.coro.id.retcon.once(i32, i32, i8*, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
declare i1 @llvm.coro.suspend.retcon.i1(...)
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
|
Loading…
Reference in New Issue