forked from OSchip/llvm-project
[Coroutines][2/6] New pass manager: coro-split
Summary: This patch has four dependencies: 1. The first in this series of patches that implement coroutine passes in the new pass manager: https://reviews.llvm.org/D71898. 2. A patch that introduces an API for CGSCC passes to add new reference edges to a `LazyCallGraph`, `updateCGAndAnalysisManagerForCGSCCPass`: https://reviews.llvm.org/D72025. 3. A patch that introduces a `CallGraphUpdater` helper class that is capable of mutating internal `LazyCallGraph` state in order to insert new function nodes into a specific SCC: https://reviews.llvm.org/D70927. 4. And finally, a small edge case fix for updating `LazyCallGraph` that patch 3 above happens to run into: https://reviews.llvm.org/D72226. This is the second in a series of patches that ports the LLVM coroutines passes to the new pass manager infrastructure. This patch implements 'coro-split'. Some notes: * Using the new CGSCC pass manager resulted in IR being printed in the reverse order in some tests. To prevent FileCheck checks from failing due to these reversed orders, this patch splits up test files that test multiple different coroutine functions: specifically coro-alloc-with-param.ll, coro-split-eh.ll, and coro-eh-aware-edge-split.ll. * CoroSplit.cpp contained 2 overloads of `splitCoroutine`, one of which dispatched to the other based on the coroutine ABI being used (C++20 switch-based versus Swift returned-continuation-based). I found this confusing, especially with the additional branching based on `CallGraph` vs. `LazyCallGraph`, so I removed the ABI-checking overload of `splitCoroutine`. Reviewers: GorNishanov, lewissbaker, chandlerc, jdoerfert, junparser, deadalnix, wenlei Reviewed By: wenlei Subscribers: wenlei, qcolombet, EricWF, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D71899
This commit is contained in:
parent
ccad194861
commit
7125d66f99
|
@ -0,0 +1,30 @@
|
|||
//===- CoroSplit.h - Converts a coroutine into a state machine -*- C++ -*--===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// \file
|
||||
// This file declares the pass that builds the coroutine frame and outlines
|
||||
// the resume and destroy parts of the coroutine into separate functions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
|
||||
#define LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
|
||||
|
||||
#include "llvm/Analysis/CGSCCPassManager.h"
|
||||
#include "llvm/Analysis/LazyCallGraph.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
struct CoroSplitPass : PassInfoMixin<CoroSplitPass> {
|
||||
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
|
||||
LazyCallGraph &CG, CGSCCUpdateResult &UR);
|
||||
};
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
|
|
@ -68,6 +68,7 @@
|
|||
#include "llvm/Target/TargetMachine.h"
|
||||
#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
|
||||
#include "llvm/Transforms/Coroutines/CoroEarly.h"
|
||||
#include "llvm/Transforms/Coroutines/CoroSplit.h"
|
||||
#include "llvm/Transforms/IPO/AlwaysInliner.h"
|
||||
#include "llvm/Transforms/IPO/ArgumentPromotion.h"
|
||||
#include "llvm/Transforms/IPO/Attributor.h"
|
||||
|
|
|
@ -111,6 +111,7 @@ CGSCC_PASS("function-attrs", PostOrderFunctionAttrsPass())
|
|||
CGSCC_PASS("attributor-cgscc", AttributorCGSCCPass())
|
||||
CGSCC_PASS("inline", InlinerPass())
|
||||
CGSCC_PASS("openmpopt", OpenMPOptPass())
|
||||
CGSCC_PASS("coro-split", CoroSplitPass())
|
||||
CGSCC_PASS("no-op-cgscc", NoOpCGSCCPass())
|
||||
#undef CGSCC_PASS
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
// coroutine.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/Transforms/Coroutines/CoroSplit.h"
|
||||
#include "CoroInstr.h"
|
||||
#include "CoroInternal.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
|
@ -59,6 +60,7 @@
|
|||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/CallGraphUpdater.h"
|
||||
#include "llvm/Transforms/Utils/Cloning.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/ValueMapper.h"
|
||||
|
@ -1342,19 +1344,8 @@ namespace {
|
|||
};
|
||||
}
|
||||
|
||||
static void splitCoroutine(Function &F, coro::Shape &Shape,
|
||||
SmallVectorImpl<Function *> &Clones) {
|
||||
switch (Shape.ABI) {
|
||||
case coro::ABI::Switch:
|
||||
return splitSwitchCoroutine(F, Shape, Clones);
|
||||
case coro::ABI::Retcon:
|
||||
case coro::ABI::RetconOnce:
|
||||
return splitRetconCoroutine(F, Shape, Clones);
|
||||
}
|
||||
llvm_unreachable("bad ABI kind");
|
||||
}
|
||||
|
||||
static void splitCoroutine(Function &F, CallGraph &CG, CallGraphSCC &SCC) {
|
||||
static coro::Shape splitCoroutine(Function &F,
|
||||
SmallVectorImpl<Function *> &Clones) {
|
||||
PrettyStackTraceFunction prettyStackTrace(F);
|
||||
|
||||
// The suspend-crossing algorithm in buildCoroutineFrame get tripped
|
||||
|
@ -1363,26 +1354,42 @@ static void splitCoroutine(Function &F, CallGraph &CG, CallGraphSCC &SCC) {
|
|||
|
||||
coro::Shape Shape(F);
|
||||
if (!Shape.CoroBegin)
|
||||
return;
|
||||
return Shape;
|
||||
|
||||
simplifySuspendPoints(Shape);
|
||||
buildCoroutineFrame(F, Shape);
|
||||
replaceFrameSize(Shape);
|
||||
|
||||
SmallVector<Function*, 4> Clones;
|
||||
|
||||
// If there are no suspend points, no split required, just remove
|
||||
// the allocation and deallocation blocks, they are not needed.
|
||||
if (Shape.CoroSuspends.empty()) {
|
||||
handleNoSuspendCoroutine(Shape);
|
||||
} else {
|
||||
splitCoroutine(F, Shape, Clones);
|
||||
switch (Shape.ABI) {
|
||||
case coro::ABI::Switch:
|
||||
splitSwitchCoroutine(F, Shape, Clones);
|
||||
break;
|
||||
case coro::ABI::Retcon:
|
||||
case coro::ABI::RetconOnce:
|
||||
splitRetconCoroutine(F, Shape, Clones);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Replace all the swifterror operations in the original function.
|
||||
// This invalidates SwiftErrorOps in the Shape.
|
||||
replaceSwiftErrorOps(F, Shape, nullptr);
|
||||
|
||||
return Shape;
|
||||
}
|
||||
|
||||
static void
|
||||
updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
|
||||
const SmallVectorImpl<Function *> &Clones,
|
||||
CallGraph &CG, CallGraphSCC &SCC) {
|
||||
if (!Shape.CoroBegin)
|
||||
return;
|
||||
|
||||
removeCoroEnds(Shape, &CG);
|
||||
postSplitCleanup(F);
|
||||
|
||||
|
@ -1390,6 +1397,43 @@ static void splitCoroutine(Function &F, CallGraph &CG, CallGraphSCC &SCC) {
|
|||
coro::updateCallGraph(F, Clones, CG, SCC);
|
||||
}
|
||||
|
||||
static void updateCallGraphAfterCoroutineSplit(
|
||||
LazyCallGraph::Node &N, const coro::Shape &Shape,
|
||||
const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
|
||||
LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
|
||||
if (!Shape.CoroBegin)
|
||||
return;
|
||||
|
||||
for (llvm::CoroEndInst *End : Shape.CoroEnds) {
|
||||
auto &Context = End->getContext();
|
||||
End->replaceAllUsesWith(ConstantInt::getFalse(Context));
|
||||
End->eraseFromParent();
|
||||
}
|
||||
|
||||
postSplitCleanup(N.getFunction());
|
||||
|
||||
// To insert the newly created coroutine funclets 'f.resume', 'f.destroy', and
|
||||
// 'f.cleanup' into the same SCC as the coroutine 'f' they were outlined from,
|
||||
// we make use of the CallGraphUpdater class, which can modify the internal
|
||||
// state of the LazyCallGraph.
|
||||
for (Function *Clone : Clones)
|
||||
CG.addNewFunctionIntoRefSCC(*Clone, C.getOuterRefSCC());
|
||||
|
||||
// We've inserted instructions into coroutine 'f' that reference the three new
|
||||
// coroutine funclets. We must now update the call graph so that reference
|
||||
// edges between 'f' and its funclets are added to it. LazyCallGraph only
|
||||
// allows CGSCC passes to insert "trivial" reference edges. We've ensured
|
||||
// above, by inserting the funclets into the same SCC as the corutine, that
|
||||
// the edges are trivial.
|
||||
//
|
||||
// N.B.: If we didn't update the call graph here, a CGSCCToFunctionPassAdaptor
|
||||
// later in this CGSCC pass pipeline may be run, triggering a call graph
|
||||
// update of its own. Function passes run by the adaptor are not permitted to
|
||||
// add new edges of any kind to the graph, and the new edges inserted by this
|
||||
// pass would be misattributed to that unrelated function pass.
|
||||
updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR);
|
||||
}
|
||||
|
||||
// When we see the coroutine the first time, we insert an indirect call to a
|
||||
// devirt trigger function and mark the coroutine that it is now ready for
|
||||
// split.
|
||||
|
@ -1521,12 +1565,86 @@ static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
|
|||
return Changed;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Top Level Driver
|
||||
//===----------------------------------------------------------------------===//
|
||||
static bool declaresCoroSplitIntrinsics(const Module &M) {
|
||||
return coro::declaresIntrinsics(
|
||||
M, {"llvm.coro.begin", "llvm.coro.prepare.retcon"});
|
||||
}
|
||||
|
||||
PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
|
||||
CGSCCAnalysisManager &AM,
|
||||
LazyCallGraph &CG, CGSCCUpdateResult &UR) {
|
||||
// NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
|
||||
// non-zero number of nodes, so we assume that here and grab the first
|
||||
// node's function's module.
|
||||
Module &M = *C.begin()->getFunction().getParent();
|
||||
if (!declaresCoroSplitIntrinsics(M))
|
||||
return PreservedAnalyses::all();
|
||||
|
||||
// Check for uses of llvm.coro.prepare.retcon.
|
||||
const auto *PrepareFn = M.getFunction("llvm.coro.prepare.retcon");
|
||||
if (PrepareFn && PrepareFn->use_empty())
|
||||
PrepareFn = nullptr;
|
||||
|
||||
// Find coroutines for processing.
|
||||
SmallVector<LazyCallGraph::Node *, 4> Coroutines;
|
||||
for (LazyCallGraph::Node &N : C)
|
||||
if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
|
||||
Coroutines.push_back(&N);
|
||||
|
||||
if (Coroutines.empty() && !PrepareFn)
|
||||
return PreservedAnalyses::all();
|
||||
|
||||
if (Coroutines.empty())
|
||||
llvm_unreachable("new pass manager cannot yet handle "
|
||||
"'llvm.coro.prepare.retcon'");
|
||||
|
||||
// Split all the coroutines.
|
||||
for (LazyCallGraph::Node *N : Coroutines) {
|
||||
Function &F = N->getFunction();
|
||||
Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR);
|
||||
StringRef Value = Attr.getValueAsString();
|
||||
LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
|
||||
<< "' state: " << Value << "\n");
|
||||
if (Value == UNPREPARED_FOR_SPLIT) {
|
||||
// Enqueue a second iteration of the CGSCC pipeline.
|
||||
// N.B.:
|
||||
// The CoroSplitLegacy pass "triggers" a restart of the CGSCC pass
|
||||
// pipeline by inserting an indirect function call that the
|
||||
// CoroElideLegacy pass then replaces with a direct function call. The
|
||||
// legacy CGSCC pipeline's implicit behavior was as if wrapped in the new
|
||||
// pass manager abstraction DevirtSCCRepeatedPass.
|
||||
//
|
||||
// This pass does not need to "trigger" another run of the pipeline.
|
||||
// Instead, it simply enqueues the same RefSCC onto the pipeline's
|
||||
// worklist.
|
||||
UR.CWorklist.insert(&C);
|
||||
F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
|
||||
continue;
|
||||
}
|
||||
F.removeFnAttr(CORO_PRESPLIT_ATTR);
|
||||
|
||||
SmallVector<Function *, 4> Clones;
|
||||
const coro::Shape Shape = splitCoroutine(F, Clones);
|
||||
updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR);
|
||||
}
|
||||
|
||||
if (PrepareFn)
|
||||
llvm_unreachable("new pass manager cannot yet handle "
|
||||
"'llvm.coro.prepare.retcon'");
|
||||
|
||||
return PreservedAnalyses::none();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// We present a coroutine to LLVM as an ordinary function with suspension
|
||||
// points marked up with intrinsics. We let the optimizer party on the coroutine
|
||||
// as a single function for as long as possible. Shortly before the coroutine is
|
||||
// eligible to be inlined into its callers, we split up the coroutine into parts
|
||||
// corresponding to initial, resume and destroy invocations of the coroutine,
|
||||
// add them to the current SCC and restart the IPO pipeline to optimize the
|
||||
// coroutine subfunctions we extracted before proceeding to the caller of the
|
||||
// coroutine.
|
||||
struct CoroSplitLegacy : public CallGraphSCCPass {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
|
@ -1539,9 +1657,7 @@ struct CoroSplitLegacy : public CallGraphSCCPass {
|
|||
// A coroutine is identified by the presence of coro.begin intrinsic, if
|
||||
// we don't have any, this pass has nothing to do.
|
||||
bool doInitialization(CallGraph &CG) override {
|
||||
Run = coro::declaresIntrinsics(CG.getModule(),
|
||||
{"llvm.coro.begin",
|
||||
"llvm.coro.prepare.retcon"});
|
||||
Run = declaresCoroSplitIntrinsics(CG.getModule());
|
||||
return CallGraphSCCPass::doInitialization(CG);
|
||||
}
|
||||
|
||||
|
@ -1583,7 +1699,10 @@ struct CoroSplitLegacy : public CallGraphSCCPass {
|
|||
continue;
|
||||
}
|
||||
F->removeFnAttr(CORO_PRESPLIT_ATTR);
|
||||
splitCoroutine(*F, CG, SCC);
|
||||
|
||||
SmallVector<Function *, 4> Clones;
|
||||
const coro::Shape Shape = splitCoroutine(*F, Clones);
|
||||
updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
|
||||
}
|
||||
|
||||
if (PrepareFn)
|
||||
|
|
|
@ -1,29 +1,7 @@
|
|||
; Check that we can handle the case when both alloc function and
|
||||
; the user body consume the same argument.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
|
||||
; using this directly (as it would happen under -O2)
|
||||
define i8* @f_direct(i64 %this) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
|
||||
%size = call i32 @llvm.coro.size.i32()
|
||||
%alloc = call i8* @myAlloc(i64 %this, i32 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%0 = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %0, label %suspend [i8 0, label %resume
|
||||
i8 1, label %cleanup]
|
||||
resume:
|
||||
call void @print2(i64 %this)
|
||||
br label %cleanup
|
||||
|
||||
cleanup:
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %suspend
|
||||
suspend:
|
||||
call i1 @llvm.coro.end(i8* %hdl, i1 0)
|
||||
ret i8* %hdl
|
||||
}
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
; using copy of this (as it would happen under -O0)
|
||||
define i8* @f_copy(i64 %this_arg) "coroutine.presplit"="1" {
|
||||
|
@ -52,27 +30,14 @@ suspend:
|
|||
}
|
||||
|
||||
; See if %this was added to the frame
|
||||
; CHECK: %f_direct.Frame = type { void (%f_direct.Frame*)*, void (%f_direct.Frame*)*, i1, i1, i64 }
|
||||
; CHECK: %f_copy.Frame = type { void (%f_copy.Frame*)*, void (%f_copy.Frame*)*, i1, i1, i64 }
|
||||
|
||||
; See that %this is spilled into the frame
|
||||
; CHECK-LABEL: define i8* @f_direct(i64 %this)
|
||||
; CHECK: %this.spill.addr = getelementptr inbounds %f_direct.Frame, %f_direct.Frame* %FramePtr, i32 0, i32 4
|
||||
; CHECK: store i64 %this, i64* %this.spill.addr
|
||||
; CHECK: ret i8* %hdl
|
||||
|
||||
; See that %this is spilled into the frame
|
||||
; CHECK-LABEL: define i8* @f_copy(i64 %this_arg)
|
||||
; CHECK: %this.spill.addr = getelementptr inbounds %f_copy.Frame, %f_copy.Frame* %FramePtr, i32 0, i32 4
|
||||
; CHECK: store i64 %this_arg, i64* %this.spill.addr
|
||||
; CHECK: ret i8* %hdl
|
||||
|
||||
; See that %this was loaded from the frame
|
||||
; CHECK-LABEL: @f_direct.resume(
|
||||
; CHECK: %this.reload = load i64, i64* %this.reload.addr
|
||||
; CHECK: call void @print2(i64 %this.reload)
|
||||
; CHECK: ret void
|
||||
|
||||
; See that %this was loaded from the frame
|
||||
; CHECK-LABEL: @f_copy.resume(
|
||||
; CHECK: %this.reload = load i64, i64* %this.reload.addr
|
|
@ -0,0 +1,58 @@
|
|||
; Check that we can handle the case when both alloc function and
|
||||
; the user body consume the same argument.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
; using this directly (as it would happen under -O2)
|
||||
define i8* @f_direct(i64 %this) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
|
||||
%size = call i32 @llvm.coro.size.i32()
|
||||
%alloc = call i8* @myAlloc(i64 %this, i32 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%0 = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %0, label %suspend [i8 0, label %resume
|
||||
i8 1, label %cleanup]
|
||||
resume:
|
||||
call void @print2(i64 %this)
|
||||
br label %cleanup
|
||||
|
||||
cleanup:
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %suspend
|
||||
suspend:
|
||||
call i1 @llvm.coro.end(i8* %hdl, i1 0)
|
||||
ret i8* %hdl
|
||||
}
|
||||
|
||||
; See if %this was added to the frame
|
||||
; CHECK: %f_direct.Frame = type { void (%f_direct.Frame*)*, void (%f_direct.Frame*)*, i1, i1, i64 }
|
||||
|
||||
; See that %this is spilled into the frame
|
||||
; CHECK-LABEL: define i8* @f_direct(i64 %this)
|
||||
; CHECK: %this.spill.addr = getelementptr inbounds %f_direct.Frame, %f_direct.Frame* %FramePtr, i32 0, i32 4
|
||||
; CHECK: store i64 %this, i64* %this.spill.addr
|
||||
; CHECK: ret i8* %hdl
|
||||
|
||||
; See that %this was loaded from the frame
|
||||
; CHECK-LABEL: @f_direct.resume(
|
||||
; CHECK: %this.reload = load i64, i64* %this.reload.addr
|
||||
; CHECK: call void @print2(i64 %this.reload)
|
||||
; CHECK: ret void
|
||||
|
||||
declare i8* @llvm.coro.free(token, i8*)
|
||||
declare i32 @llvm.coro.size.i32()
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
declare void @llvm.coro.resume(i8*)
|
||||
declare void @llvm.coro.destroy(i8*)
|
||||
|
||||
declare token @llvm.coro.id(i32, i8*, i8*, i8*)
|
||||
declare i1 @llvm.coro.alloc(token)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
|
||||
declare noalias i8* @myAlloc(i64, i32)
|
||||
declare double @print(double)
|
||||
declare void @print2(i64)
|
||||
declare void @free(i8*)
|
|
@ -1,5 +1,6 @@
|
|||
; Verifies that we can insert the spill for a PHI preceding the catchswitch
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
|
||||
target triple = "i686-pc-windows-msvc"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Tests that debug information is sane after coro-split
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
source_filename = "simple-repro.c"
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
; Check that we can handle edge splits leading into a landingpad
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @f.resume(
|
||||
define void @f(i1 %cond) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
|
||||
; Verify that we cloned landing pad on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = landingpad { i8*, i32 }
|
||||
; CHECK: catch i8* null
|
||||
; CHECK: br label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = landingpad { i8*, i32 }
|
||||
; CHECK: catch i8* null
|
||||
; CHECK: br label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ 0, %pad.with.phi.from.invoke1 ], [ 1, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %lp = phi { i8*, i32 } [ %0, %pad.with.phi.from.invoke2 ], [ %1, %pad.with.phi.from.invoke1 ]
|
||||
; CHECK: %exn = extractvalue { i8*, i32 } %lp, 0
|
||||
; CHECK: call i8* @__cxa_begin_catch(i8* %exn)
|
||||
; CHECK: call void @use_val(i32 %val)
|
||||
; CHECK: call void @__cxa_end_catch()
|
||||
; CHECK: call void @free(i8* %vFrame)
|
||||
; CHECK: ret void
|
||||
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ 0, %invoke1 ], [ 1, %invoke2 ]
|
||||
%lp = landingpad { i8*, i32 }
|
||||
catch i8* null
|
||||
%exn = extractvalue { i8*, i32 } %lp, 0
|
||||
call i8* @__cxa_begin_catch(i8* %exn)
|
||||
call void @use_val(i32 %val)
|
||||
call void @__cxa_end_catch()
|
||||
br label %cleanup
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
|
||||
unreach:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
|
||||
declare noalias i8* @malloc(i64)
|
||||
declare i64 @llvm.coro.size.i64()
|
||||
declare i8* @llvm.coro.begin(token, i8* writeonly)
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare token @llvm.coro.save(i8*)
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
|
||||
; Function Attrs: argmemonly nounwind
|
||||
declare void @may_throw1()
|
||||
declare void @may_throw2()
|
||||
|
||||
declare i8* @__cxa_begin_catch(i8*)
|
||||
|
||||
declare void @use_val(i32)
|
||||
declare void @__cxa_end_catch()
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
declare void @free(i8*)
|
||||
declare i8* @llvm.coro.free(token, i8* nocapture readonly)
|
|
@ -0,0 +1,92 @@
|
|||
; Check that we can handle edge splits leading into a landingpad
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @g.resume(
|
||||
define void @g(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
|
||||
; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = cleanuppad within none []
|
||||
; CHECK: %y.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 6
|
||||
; CHECK: %y.reload = load i32, i32* %y.reload.addr
|
||||
; CHECK: cleanupret from %0 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = cleanuppad within none []
|
||||
; CHECK: %x.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 5
|
||||
; CHECK: %x.reload = load i32, i32* %x.reload.addr
|
||||
; CHECK: cleanupret from %1 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %tok = cleanuppad within none []
|
||||
; CHECK: call void @use_val(i32 %val)
|
||||
; CHECK: cleanupret from %tok unwind to caller
|
||||
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
|
||||
%tok = cleanuppad within none []
|
||||
call void @use_val(i32 %val)
|
||||
cleanupret from %tok unwind to caller
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
|
||||
unreach:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
|
||||
declare noalias i8* @malloc(i64)
|
||||
declare i64 @llvm.coro.size.i64()
|
||||
declare i8* @llvm.coro.begin(token, i8* writeonly)
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare token @llvm.coro.save(i8*)
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
|
||||
; Function Attrs: argmemonly nounwind
|
||||
declare void @may_throw1()
|
||||
declare void @may_throw2()
|
||||
|
||||
declare i8* @__cxa_begin_catch(i8*)
|
||||
|
||||
declare void @use_val(i32)
|
||||
declare void @__cxa_end_catch()
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
declare void @free(i8*)
|
||||
declare i8* @llvm.coro.free(token, i8* nocapture readonly)
|
|
@ -0,0 +1,89 @@
|
|||
; Check that we can handle edge splits leading into a landingpad
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @h.resume(
|
||||
define void @h(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %coro.ret unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %coro.ret unwind label %pad.with.phi
|
||||
|
||||
; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = cleanuppad within none []
|
||||
; CHECK: %y.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 6
|
||||
; CHECK: %y.reload = load i32, i32* %y.reload.addr
|
||||
; CHECK: cleanupret from %0 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = cleanuppad within none []
|
||||
; CHECK: %x.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 5
|
||||
; CHECK: %x.reload = load i32, i32* %x.reload.addr
|
||||
; CHECK: cleanupret from %1 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %switch = catchswitch within none [label %catch] unwind to caller
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
|
||||
%switch = catchswitch within none [label %catch] unwind to caller
|
||||
|
||||
catch: ; preds = %catch.dispatch
|
||||
%pad = catchpad within %switch [i8* null, i32 64, i8* null]
|
||||
call void @use_val(i32 %val)
|
||||
catchret from %pad to label %coro.ret
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
|
||||
declare noalias i8* @malloc(i64)
|
||||
declare i64 @llvm.coro.size.i64()
|
||||
declare i8* @llvm.coro.begin(token, i8* writeonly)
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare token @llvm.coro.save(i8*)
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
|
||||
; Function Attrs: argmemonly nounwind
|
||||
declare void @may_throw1()
|
||||
declare void @may_throw2()
|
||||
|
||||
declare i8* @__cxa_begin_catch(i8*)
|
||||
|
||||
declare void @use_val(i32)
|
||||
declare void @__cxa_end_catch()
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
declare void @free(i8*)
|
||||
declare i8* @llvm.coro.free(token, i8* nocapture readonly)
|
|
@ -1,218 +0,0 @@
|
|||
; Check that we can handle edge splits leading into a landingpad
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @f.resume(
|
||||
define void @f(i1 %cond) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
|
||||
; Verify that we cloned landing pad on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = landingpad { i8*, i32 }
|
||||
; CHECK: catch i8* null
|
||||
; CHECK: br label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = landingpad { i8*, i32 }
|
||||
; CHECK: catch i8* null
|
||||
; CHECK: br label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ 0, %pad.with.phi.from.invoke1 ], [ 1, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %lp = phi { i8*, i32 } [ %0, %pad.with.phi.from.invoke2 ], [ %1, %pad.with.phi.from.invoke1 ]
|
||||
; CHECK: %exn = extractvalue { i8*, i32 } %lp, 0
|
||||
; CHECK: call i8* @__cxa_begin_catch(i8* %exn)
|
||||
; CHECK: call void @use_val(i32 %val)
|
||||
; CHECK: call void @__cxa_end_catch()
|
||||
; CHECK: call void @free(i8* %vFrame)
|
||||
; CHECK: ret void
|
||||
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ 0, %invoke1 ], [ 1, %invoke2 ]
|
||||
%lp = landingpad { i8*, i32 }
|
||||
catch i8* null
|
||||
%exn = extractvalue { i8*, i32 } %lp, 0
|
||||
call i8* @__cxa_begin_catch(i8* %exn)
|
||||
call void @use_val(i32 %val)
|
||||
call void @__cxa_end_catch()
|
||||
br label %cleanup
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
|
||||
unreach:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @g.resume(
|
||||
define void @g(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %unreach unwind label %pad.with.phi
|
||||
|
||||
; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = cleanuppad within none []
|
||||
; CHECK: %y.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 6
|
||||
; CHECK: %y.reload = load i32, i32* %y.reload.addr
|
||||
; CHECK: cleanupret from %0 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = cleanuppad within none []
|
||||
; CHECK: %x.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 5
|
||||
; CHECK: %x.reload = load i32, i32* %x.reload.addr
|
||||
; CHECK: cleanupret from %1 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %tok = cleanuppad within none []
|
||||
; CHECK: call void @use_val(i32 %val)
|
||||
; CHECK: cleanupret from %tok unwind to caller
|
||||
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
|
||||
%tok = cleanuppad within none []
|
||||
call void @use_val(i32 %val)
|
||||
cleanupret from %tok unwind to caller
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
|
||||
unreach:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define internal fastcc void @h.resume(
|
||||
define void @h(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null)
|
||||
%size = tail call i64 @llvm.coro.size.i64()
|
||||
%alloc = call i8* @malloc(i64 %size)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc)
|
||||
%sp = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %sp, label %coro.ret [
|
||||
i8 0, label %resume
|
||||
i8 1, label %cleanup
|
||||
]
|
||||
|
||||
resume:
|
||||
br i1 %cond, label %invoke1, label %invoke2
|
||||
|
||||
invoke1:
|
||||
invoke void @may_throw1()
|
||||
to label %coro.ret unwind label %pad.with.phi
|
||||
invoke2:
|
||||
invoke void @may_throw2()
|
||||
to label %coro.ret unwind label %pad.with.phi
|
||||
|
||||
; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke2:
|
||||
; CHECK: %0 = cleanuppad within none []
|
||||
; CHECK: %y.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 6
|
||||
; CHECK: %y.reload = load i32, i32* %y.reload.addr
|
||||
; CHECK: cleanupret from %0 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi.from.invoke1:
|
||||
; CHECK: %1 = cleanuppad within none []
|
||||
; CHECK: %x.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 5
|
||||
; CHECK: %x.reload = load i32, i32* %x.reload.addr
|
||||
; CHECK: cleanupret from %1 unwind label %pad.with.phi
|
||||
|
||||
; CHECK: pad.with.phi:
|
||||
; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ]
|
||||
; CHECK: %switch = catchswitch within none [label %catch] unwind to caller
|
||||
pad.with.phi:
|
||||
%val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ]
|
||||
%switch = catchswitch within none [label %catch] unwind to caller
|
||||
|
||||
catch: ; preds = %catch.dispatch
|
||||
%pad = catchpad within %switch [i8* null, i32 64, i8* null]
|
||||
call void @use_val(i32 %val)
|
||||
catchret from %pad to label %coro.ret
|
||||
|
||||
cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend
|
||||
%mem = call i8* @llvm.coro.free(token %id, i8* %hdl)
|
||||
call void @free(i8* %mem)
|
||||
br label %coro.ret
|
||||
|
||||
coro.ret:
|
||||
call i1 @llvm.coro.end(i8* null, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
|
||||
declare noalias i8* @malloc(i64)
|
||||
declare i64 @llvm.coro.size.i64()
|
||||
declare i8* @llvm.coro.begin(token, i8* writeonly)
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare token @llvm.coro.save(i8*)
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
|
||||
; Function Attrs: argmemonly nounwind
|
||||
declare void @may_throw1()
|
||||
declare void @may_throw2()
|
||||
|
||||
declare i8* @__cxa_begin_catch(i8*)
|
||||
|
||||
declare void @use_val(i32)
|
||||
declare void @__cxa_end_catch()
|
||||
|
||||
; Function Attrs: nounwind
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
declare void @free(i8*)
|
||||
declare i8* @llvm.coro.free(token, i8* nocapture readonly)
|
|
@ -1,5 +1,6 @@
|
|||
; Check that we can handle spills of array allocas
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
declare void @consume.double.ptr(double*)
|
||||
declare void @consume.i32.ptr(i32*)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Check that coro-split doesn't choke on intrinsics in unreachable blocks
|
||||
; RUN: opt < %s -coro-split -S
|
||||
; RUN: opt < %s -passes=coro-split -S
|
||||
|
||||
define i8* @f(i1 %arg) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Check that we can handle spills of the result of the invoke instruction
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f(i64 %this) "coroutine.presplit"="1" personality i32 0 {
|
||||
entry:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Verifies that we materialize instruction across suspend points
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f(i32 %n) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Check that we will insert the correct padding if natural alignment of the
|
||||
; spilled data does not match the alignment specified in alloca instruction.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
%PackedStruct = type <{ i64 }>
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Check that we create copy the data from the alloca into the coroutine
|
||||
; frame slot if it was written to.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f() "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Verifies that we insert spills of PHI instruction _after) all PHI Nodes
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f(i1 %n) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Check that we can spills coro.begin from an inlined inner coroutine.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
%g.Frame = type { void (%g.Frame*)*, void (%g.Frame*)*, i32, i1, i32 }
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Tests that coro-split pass splits the coroutine into f, f.resume and f.destroy
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f() "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
; a value produces between coro.save and coro.suspend (%Result.i19)
|
||||
; and checks whether stray coro.saves are properly removed
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
%"struct.std::coroutine_handle" = type { i8* }
|
||||
%"struct.std::coroutine_handle.0" = type { %"struct.std::coroutine_handle" }
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Tests that coro-split passes initialized values to coroutine frame allocator.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f(i32 %argument) "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Make sure that coro-split correctly deals with debug information.
|
||||
; The test here is simply that it does not result in bad IR that will crash opt.
|
||||
; RUN: opt < %s -coro-split -disable-output
|
||||
; RUN: opt < %s -passes=coro-split -disable-output
|
||||
source_filename = "coro.c"
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Tests that coro-split removes cleanup code after coro.end in resume functions
|
||||
; and retains it in the start function.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f(i1 %val) "coroutine.presplit"="1" personality i32 3 {
|
||||
entry:
|
||||
|
@ -53,52 +54,6 @@ eh.resume:
|
|||
; CHECK-NEXT: call void @print(i32 3)
|
||||
; CHECK-NEXT: resume { i8*, i32 } %lpval
|
||||
|
||||
define i8* @f2(i1 %val) "coroutine.presplit"="1" personality i32 4 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
call void @print(i32 0)
|
||||
br i1 %val, label %resume, label %susp
|
||||
|
||||
susp:
|
||||
%0 = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %0, label %suspend [i8 0, label %resume
|
||||
i8 1, label %suspend]
|
||||
resume:
|
||||
invoke void @print(i32 1) to label %suspend unwind label %lpad
|
||||
|
||||
suspend:
|
||||
call i1 @llvm.coro.end(i8* %hdl, i1 0)
|
||||
call void @print(i32 0) ; should not be present in f.resume
|
||||
ret i8* %hdl
|
||||
|
||||
lpad:
|
||||
%tok = cleanuppad within none []
|
||||
call void @print(i32 2)
|
||||
%unused = call i1 @llvm.coro.end(i8* null, i1 true) [ "funclet"(token %tok) ]
|
||||
cleanupret from %tok unwind label %cleanup.cont
|
||||
|
||||
cleanup.cont:
|
||||
%tok2 = cleanuppad within none []
|
||||
call void @print(i32 3) ; should not be present in f.resume
|
||||
cleanupret from %tok2 unwind to caller
|
||||
}
|
||||
|
||||
; Verify that start function contains both print calls the one before and after coro.end
|
||||
; CHECK-LABEL: define i8* @f2(
|
||||
; CHECK: invoke void @print(i32 1)
|
||||
; CHECK: to label %AfterCoroEnd unwind label %lpad
|
||||
|
||||
; CHECK: AfterCoroEnd:
|
||||
; CHECK: call void @print(i32 0)
|
||||
; CHECK: ret i8* %hdl
|
||||
|
||||
; CHECK: lpad:
|
||||
; CHECK-NEXT: %tok = cleanuppad within none []
|
||||
; CHECK-NEXT: call void @print(i32 2)
|
||||
; CHECK-NEXT: call void @print(i32 3)
|
||||
; CHECK-NEXT: cleanupret from %tok unwind to caller
|
||||
|
||||
; VERIFY Resume Parts
|
||||
|
||||
; Verify that resume function does not contains both print calls appearing after coro.end
|
||||
|
@ -115,19 +70,6 @@ cleanup.cont:
|
|||
; CHECK-NEXT: call void @print(i32 2)
|
||||
; CHECK-NEXT: resume { i8*, i32 } %lpval
|
||||
|
||||
; Verify that resume function does not contains both print calls appearing after coro.end
|
||||
; CHECK-LABEL: define internal fastcc void @f2.resume
|
||||
; CHECK: invoke void @print(i32 1)
|
||||
; CHECK: to label %CoroEnd unwind label %lpad
|
||||
|
||||
; CHECK: CoroEnd:
|
||||
; CHECK-NEXT: ret void
|
||||
|
||||
; CHECK: lpad:
|
||||
; CHECK-NEXT: %tok = cleanuppad within none []
|
||||
; CHECK-NEXT: call void @print(i32 2)
|
||||
; CHECK-NEXT: cleanupret from %tok unwind to caller
|
||||
|
||||
declare i8* @llvm.coro.free(token, i8*)
|
||||
declare i32 @llvm.coro.size.i32()
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
|
@ -0,0 +1,81 @@
|
|||
; Tests that coro-split removes cleanup code after coro.end in resume functions
|
||||
; and retains it in the start function.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define i8* @f2(i1 %val) "coroutine.presplit"="1" personality i32 4 {
|
||||
entry:
|
||||
%id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
call void @print(i32 0)
|
||||
br i1 %val, label %resume, label %susp
|
||||
|
||||
susp:
|
||||
%0 = call i8 @llvm.coro.suspend(token none, i1 false)
|
||||
switch i8 %0, label %suspend [i8 0, label %resume
|
||||
i8 1, label %suspend]
|
||||
resume:
|
||||
invoke void @print(i32 1) to label %suspend unwind label %lpad
|
||||
|
||||
suspend:
|
||||
call i1 @llvm.coro.end(i8* %hdl, i1 0)
|
||||
call void @print(i32 0) ; should not be present in f.resume
|
||||
ret i8* %hdl
|
||||
|
||||
lpad:
|
||||
%tok = cleanuppad within none []
|
||||
call void @print(i32 2)
|
||||
%unused = call i1 @llvm.coro.end(i8* null, i1 true) [ "funclet"(token %tok) ]
|
||||
cleanupret from %tok unwind label %cleanup.cont
|
||||
|
||||
cleanup.cont:
|
||||
%tok2 = cleanuppad within none []
|
||||
call void @print(i32 3) ; should not be present in f.resume
|
||||
cleanupret from %tok2 unwind to caller
|
||||
}
|
||||
|
||||
; Verify that start function contains both print calls the one before and after coro.end
|
||||
; CHECK-LABEL: define i8* @f2(
|
||||
; CHECK: invoke void @print(i32 1)
|
||||
; CHECK: to label %AfterCoroEnd unwind label %lpad
|
||||
|
||||
; CHECK: AfterCoroEnd:
|
||||
; CHECK: call void @print(i32 0)
|
||||
; CHECK: ret i8* %hdl
|
||||
|
||||
; CHECK: lpad:
|
||||
; CHECK-NEXT: %tok = cleanuppad within none []
|
||||
; CHECK-NEXT: call void @print(i32 2)
|
||||
; CHECK-NEXT: call void @print(i32 3)
|
||||
; CHECK-NEXT: cleanupret from %tok unwind to caller
|
||||
|
||||
; VERIFY Resume Parts
|
||||
|
||||
; Verify that resume function does not contains both print calls appearing after coro.end
|
||||
; CHECK-LABEL: define internal fastcc void @f2.resume
|
||||
; CHECK: invoke void @print(i32 1)
|
||||
; CHECK: to label %CoroEnd unwind label %lpad
|
||||
|
||||
; CHECK: CoroEnd:
|
||||
; CHECK-NEXT: ret void
|
||||
|
||||
; CHECK: lpad:
|
||||
; CHECK-NEXT: %tok = cleanuppad within none []
|
||||
; CHECK-NEXT: call void @print(i32 2)
|
||||
; CHECK-NEXT: cleanupret from %tok unwind to caller
|
||||
|
||||
declare i8* @llvm.coro.free(token, i8*)
|
||||
declare i32 @llvm.coro.size.i32()
|
||||
declare i8 @llvm.coro.suspend(token, i1)
|
||||
declare void @llvm.coro.resume(i8*)
|
||||
declare void @llvm.coro.destroy(i8*)
|
||||
|
||||
declare token @llvm.coro.id(i32, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.alloc(token)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
declare i1 @llvm.coro.end(i8*, i1)
|
||||
|
||||
declare noalias i8* @malloc(i32)
|
||||
declare void @print(i32)
|
||||
declare void @free(i8*)
|
||||
|
|
@ -2,6 +2,7 @@
|
|||
; These may be generated by a frontend such as Clang, when inlining with
|
||||
; '-fvisibility-inlines-hidden'.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define hidden i8* @f() "coroutine.presplit"="1" {
|
||||
entry:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Tests that coro-split will convert coro.resume followed by a suspend to a
|
||||
; musttail call.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define void @f() #0 {
|
||||
entry:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
; Tests that coro-split will convert coro.resume followed by a suspend to a
|
||||
; musttail call.
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
define void @f() #0 {
|
||||
entry:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Test no suspend coroutines
|
||||
; RUN: opt < %s -coro-split -S | FileCheck %s
|
||||
; RUN: opt < %s -passes=coro-split -S | FileCheck %s
|
||||
|
||||
; Coroutine with no-suspends will turn into:
|
||||
;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; Verifies that restart trigger forces IPO pipelines restart and the same
|
||||
; coroutine is looked at by CoroSplit pass twice.
|
||||
; Verifies that the restart trigger that is used by legacy coroutine passes
|
||||
; forces the legacy pass manager to restart IPO pipelines, thereby causing the
|
||||
; same coroutine to be looked at by CoroSplit pass twice.
|
||||
; REQUIRES: asserts
|
||||
; RUN: opt < %s -S -O0 -enable-coroutines -debug-only=coro-split 2>&1 | FileCheck %s
|
||||
; RUN: opt < %s -S -O1 -enable-coroutines -debug-only=coro-split 2>&1 | FileCheck %s
|
||||
|
|
Loading…
Reference in New Issue