Convert the exception-freeing cleanup over to the conditional cleanups code,

fixing a crash which probably nobody was ever going to see.  In doing so,
fix a horrendous number of problems with the conditional-cleanups code.
Also, make conditional cleanups re-use the cleanup's activation variable,
which avoids some unfortunate repetitiveness.

llvm-svn: 124481
This commit is contained in:
John McCall 2011-01-28 08:37:24 +00:00
parent aaf401241a
commit e4df6c8d96
5 changed files with 142 additions and 151 deletions

View File

@ -697,6 +697,7 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D,
DidCallStackSave = true;
// Push a cleanup block and restore the stack there.
// FIXME: in general circumstances, this should be an EH cleanup.
EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}

View File

@ -172,19 +172,26 @@ void EHScopeStack::popNullFixups() {
BranchFixups.pop_back();
}
llvm::Value *CodeGenFunction::initFullExprCleanup() {
void CodeGenFunction::initFullExprCleanup() {
// Create a variable to decide whether the cleanup needs to be run.
llvm::AllocaInst *run = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
llvm::AllocaInst *active
= CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
new llvm::StoreInst(Builder.getFalse(), run, &block->back());
new llvm::StoreInst(Builder.getFalse(), active, &block->back());
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), run);
Builder.CreateStore(Builder.getTrue(), active);
return run;
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
cleanup.setActiveFlag(active);
if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
}
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
@ -483,26 +490,11 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
struct FreeExceptionCleanup : EHScopeStack::Cleanup {
FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
llvm::Value *ExnLocVar)
: ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
llvm::Value *ShouldFreeVar;
llvm::Value *ExnLocVar;
void Emit(CodeGenFunction &CGF, bool IsForEH) {
llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
"should-free-exnobj");
CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
CGF.EmitBlock(FreeBB);
llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
struct FreeException {
static void Emit(CodeGenFunction &CGF, bool forEH,
llvm::Value *exn) {
CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
->setDoesNotThrow();
CGF.EmitBlock(DoneBB);
}
};
}
@ -511,41 +503,17 @@ namespace {
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
llvm::Value *ExnLoc) {
// We want to release the allocated exception object if this
// expression throws. We do this by pushing an EH-only cleanup
// block which, furthermore, deactivates itself after the expression
// is complete.
llvm::AllocaInst *ShouldFreeVar =
CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
"should-free-exnobj.var");
CGF.InitTempAlloca(ShouldFreeVar,
llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
// A variable holding the exception pointer. This is necessary
// because the throw expression does not necessarily dominate the
// cleanup, for example if it appears in a conditional expression.
llvm::AllocaInst *ExnLocVar =
CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
llvm::Value *addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
// FIXME: stmt expressions might require this to be a normal
// cleanup, too.
CGF.EHStack.pushCleanup<FreeExceptionCleanup>(EHCleanup,
ShouldFreeVar,
ExnLocVar);
EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
ShouldFreeVar);
CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr);
EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
const llvm::Type *Ty = CGF.ConvertTypeForMem(E->getType())->getPointerTo();
llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
const llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@ -554,22 +522,10 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false, /*IsInit*/ true);
CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
ShouldFreeVar);
// Technically, the exception object is like a temporary; it has to
// be cleaned up when its full-expression is complete.
// Unfortunately, the AST represents full-expressions by creating a
// ExprWithCleanups, which it only does when there are actually
// temporaries.
//
// If any cleanups have been added since we pushed ours, they must
// be from temporaries; this will get popped at the same time.
// Otherwise we need to pop ours off. FIXME: this is very brittle.
if (Cleanup == CGF.EHStack.stable_begin())
CGF.PopCleanupBlock();
// Deactivate the cleanup block.
CGF.DeactivateCleanupBlock(cleanup);
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
@ -1671,23 +1627,3 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
EHScopeStack::Cleanup::~Cleanup() {
llvm_unreachable("Cleanup is indestructable");
}
void EHScopeStack::ConditionalCleanup::Emit(CodeGenFunction &CGF,
bool IsForEHCleanup) {
// Determine whether we should run the cleanup.
llvm::Value *condVal = CGF.Builder.CreateLoad(cond, "cond.should-run");
llvm::BasicBlock *cleanup = CGF.createBasicBlock("cond-cleanup.run");
llvm::BasicBlock *cont = CGF.createBasicBlock("cond-cleanup.cont");
// If we shouldn't run the cleanup, jump directly to the continuation block.
CGF.Builder.CreateCondBr(condVal, cleanup, cont);
CGF.EmitBlock(cleanup);
// Emit the core of the cleanup.
EmitImpl(CGF, IsForEHCleanup);
assert(CGF.HaveInsertPoint() && "cleanup didn't end with valid IP!");
// Fall into the continuation block.
CGF.EmitBlock(cont);
}

View File

@ -1197,6 +1197,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
@ -1361,7 +1363,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
EmitBlock(DeleteEnd);
}
llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
QualType Ty = E->getType();
const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();

View File

@ -103,9 +103,12 @@ struct BranchFixup {
template
<class T,
bool mustSave =
llvm::is_base_of<llvm::Value, llvm::remove_pointer<T> >::value
&& !llvm::is_base_of<llvm::Constant, llvm::remove_pointer<T> >::value
&& !llvm::is_base_of<llvm::BasicBlock, llvm::remove_pointer<T> >::value>
llvm::is_base_of<llvm::Value,
typename llvm::remove_pointer<T>::type>::value
&& !llvm::is_base_of<llvm::Constant,
typename llvm::remove_pointer<T>::type>::value
&& !llvm::is_base_of<llvm::BasicBlock,
typename llvm::remove_pointer<T>::type>::value>
struct SavedValueInCond {
typedef T type;
typedef T saved_type;
@ -193,24 +196,18 @@ public:
virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
};
/// A helper class for cleanups that execute conditionally.
class ConditionalCleanup : public Cleanup {
/// Either an i1 which directly indicates whether the cleanup
/// should be run or an i1* from which that should be loaded.
llvm::Value *cond;
public:
virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup);
protected:
ConditionalCleanup(llvm::Value *cond) : cond(cond) {}
/// Emit the non-conditional code for the cleanup.
virtual void EmitImpl(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
};
/// UnconditionalCleanupN stores its N parameters and just passes
/// them to the real cleanup function.
template <class T, class A0>
class UnconditionalCleanup1 : public Cleanup {
A0 a0;
public:
UnconditionalCleanup1(A0 a0) : a0(a0) {}
void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
T::Emit(CGF, IsForEHCleanup, a0);
}
};
template <class T, class A0, class A1>
class UnconditionalCleanup2 : public Cleanup {
A0 a0; A1 a1;
@ -223,22 +220,37 @@ public:
/// ConditionalCleanupN stores the saved form of its N parameters,
/// then restores them and performs the cleanup.
template <class T, class A0>
class ConditionalCleanup1 : public Cleanup {
typedef typename SavedValueInCond<A0>::saved_type A0_saved;
A0_saved a0_saved;
void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
A0 a0 = SavedValueInCond<A0>::restore(CGF, a0_saved);
T::Emit(CGF, IsForEHCleanup, a0);
}
public:
ConditionalCleanup1(A0_saved a0)
: a0_saved(a0) {}
};
template <class T, class A0, class A1>
class ConditionalCleanup2 : public ConditionalCleanup {
class ConditionalCleanup2 : public Cleanup {
typedef typename SavedValueInCond<A0>::saved_type A0_saved;
typedef typename SavedValueInCond<A1>::saved_type A1_saved;
A0_saved a0_saved;
A1_saved a1_saved;
void EmitImpl(CodeGenFunction &CGF, bool IsForEHCleanup) {
void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
A0 a0 = SavedValueInCond<A0>::restore(CGF, a0_saved);
A1 a1 = SavedValueInCond<A1>::restore(CGF, a1_saved);
T::Emit(CGF, IsForEHCleanup, a0, a1);
}
public:
ConditionalCleanup2(llvm::Value *cond, A0_saved a0, A1_saved a1)
: ConditionalCleanup(cond), a0_saved(a0), a1_saved(a1) {}
ConditionalCleanup2(A0_saved a0, A1_saved a1)
: a0_saved(a0), a1_saved(a1) {}
};
private:
@ -602,8 +614,9 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
/// Sets up a condition for a full-expression cleanup.
llvm::Value *initFullExprCleanup();
/// Set up the last cleaup that was pushed as a conditional
/// full-expression cleanup.
void initFullExprCleanup();
template <class T>
typename SavedValueInCond<T>::saved_type saveValueInCond(T value) {
@ -626,6 +639,25 @@ public:
llvm::Constant *RethrowFn);
void ExitFinallyBlock(FinallyInfo &FinallyInfo);
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
/// we're currently inside a conditionally-evaluated expression.
template <class T, class A0>
void pushFullExprCleanup(CleanupKind kind, A0 a0) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
if (!isInConditionalBranch()) {
typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
return EHStack.pushCleanup<CleanupType>(kind, a0);
}
typename SavedValueInCond<A0>::saved_type a0_saved = saveValueInCond(a0);
typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
EHStack.pushCleanup<CleanupType>(kind, a0_saved);
initFullExprCleanup();
}
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
/// we're currently inside a conditionally-evaluated expression.
@ -633,19 +665,17 @@ public:
void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
if (!(isInConditionalBranch() ||
SavedValueInCond<A0>::needsSaving(a0) ||
SavedValueInCond<A1>::needsSaving(a1))) {
if (!isInConditionalBranch()) {
typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
}
llvm::Value *condVar = initFullExprCleanup();
typename SavedValueInCond<A0>::saved_type a0_saved = saveValueInCond(a0);
typename SavedValueInCond<A1>::saved_type a1_saved = saveValueInCond(a1);
typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
EHStack.pushCleanup<CleanupType>(kind, condVar, a0_saved, a1_saved);
EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
initFullExprCleanup();
}
/// PushDestructorCleanup - Push a cleanup to call the

View File

@ -10,16 +10,10 @@ void test1() {
}
// CHECK: define void @_Z5test1v()
// CHECK: [[FREEVAR:%.*]] = alloca i1
// CHECK-NEXT: [[EXNOBJVAR:%.*]] = alloca i8*
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8)
// CHECK-NEXT: store i8* [[EXNOBJ]], i8** [[EXNOBJVAR]]
// CHECK-NEXT: store i1 true, i1* [[FREEVAR]]
// CHECK: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8)
// CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[DSTAR:%[^*]*\*]]
// CHECK-NEXT: [[EXN2:%.*]] = bitcast [[DSTAR]] [[EXN]] to i8*
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[EXN2]], i8* bitcast ([[DSTAR]] @d1 to i8*), i64 8, i32 8, i1 false)
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%0* @_ZTI7test1_D to i8*), i8* null) noreturn
// CHECK-NEXT: unreachable
@ -36,20 +30,14 @@ void test2() {
}
// CHECK: define void @_Z5test2v()
// CHECK: [[FREEVAR:%.*]] = alloca i1
// CHECK-NEXT: [[EXNOBJVAR:%.*]] = alloca i8*
// CHECK-NEXT: [[EXNSLOTVAR:%.*]] = alloca i8*
// CHECK: [[EXNSLOTVAR:%.*]] = alloca i8*
// CHECK-NEXT: [[CLEANUPDESTVAR:%.*]] = alloca i32
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 16)
// CHECK-NEXT: store i8* [[EXNOBJ]], i8** [[EXNOBJVAR]]
// CHECK-NEXT: store i1 true, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[DSTAR:%[^*]*\*]]
// CHECK-NEXT: invoke void @_ZN7test2_DC1ERKS_([[DSTAR]] [[EXN]], [[DSTAR]] @d2)
// CHECK-NEXT: to label %[[CONT:.*]] unwind label %{{.*}}
// : [[CONT]]: (can't check this in Release-Asserts builds)
// CHECK: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%{{.*}}* @_ZTI7test2_D to i8*), i8* null) noreturn
// CHECK: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%{{.*}}* @_ZTI7test2_D to i8*), i8* null) noreturn
// CHECK-NEXT: unreachable
@ -64,15 +52,9 @@ void test3() {
}
// CHECK: define void @_Z5test3v()
// CHECK: [[FREEVAR:%.*]] = alloca i1
// CHECK-NEXT: [[EXNOBJVAR:%.*]] = alloca i8*
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8)
// CHECK-NEXT: store i8* [[EXNOBJ]], i8** [[EXNOBJVAR]]
// CHECK-NEXT: store i1 true, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[DSS:%[^*]*\*]]*
// CHECK-NEXT: store [[DSS]] null, [[DSS]]* [[EXN]]
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8)
// CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[D:%[^*]+]]**
// CHECK-NEXT: store [[D]]* null, [[D]]** [[EXN]]
// CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast (%1* @_ZTIPV7test3_D to i8*), i8* null) noreturn
// CHECK-NEXT: unreachable
@ -121,20 +103,14 @@ namespace test6 {
namespace test7 {
// CHECK: define i32 @_ZN5test73fooEv()
int foo() {
// CHECK: [[FREEEXNOBJ:%.*]] = alloca i1
// CHECK-NEXT: [[EXNALLOCVAR:%.*]] = alloca i8*
// CHECK-NEXT: [[CAUGHTEXNVAR:%.*]] = alloca i8*
// CHECK: [[CAUGHTEXNVAR:%.*]] = alloca i8*
// CHECK-NEXT: [[INTCATCHVAR:%.*]] = alloca i32
// CHECK-NEXT: [[EHCLEANUPDESTVAR:%.*]] = alloca i32
// CHECK-NEXT: store i1 false, i1* [[FREEEXNOBJ]]
try {
try {
// CHECK-NEXT: [[EXNALLOC:%.*]] = call i8* @__cxa_allocate_exception
// CHECK-NEXT: store i8* [[EXNALLOC]], i8** [[EXNALLOCVAR]]
// CHECK-NEXT: store i1 true, i1* [[FREEEXNOBJ]]
// CHECK-NEXT: bitcast i8* [[EXNALLOC]] to i32*
// CHECK-NEXT: store i32 1, i32*
// CHECK-NEXT: store i1 false, i1* [[FREEEXNOBJ]]
// CHECK-NEXT: invoke void @__cxa_throw(i8* [[EXNALLOC]], i8* bitcast (i8** @_ZTIi to i8*), i8* null
throw 1;
}
@ -414,3 +390,49 @@ namespace test15 {
// CHECK: call void @_ZN6test151AD1Ev
}
}
namespace test16 {
struct A { A(); ~A(); };
struct B { int x; B(const A &); ~B(); };
void foo();
bool cond();
// CHECK: define void @_ZN6test163barEv()
void bar() {
// CHECK: [[EXN_SAVE:%.*]] = alloca i8*
// CHECK-NEXT: [[EXN_ACTIVE:%.*]] = alloca i1
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[A:%.*]],
// CHECK-NEXT: [[EXNSLOT:%.*]] = alloca i8*
// CHECK-NEXT: [[EHDEST:%.*]] = alloca i32
// CHECK-NEXT: [[TEMP_ACTIVE:%.*]] = alloca i1
cond() ? throw B(A()) : foo();
// CHECK-NEXT: [[COND:%.*]] = call zeroext i1 @_ZN6test164condEv()
// CHECK-NEXT: store i1 false, i1* [[EXN_ACTIVE]]
// CHECK-NEXT: store i1 false, i1* [[TEMP_ACTIVE]]
// CHECK-NEXT: br i1 [[COND]],
// CHECK: [[EXN:%.*]] = call i8* @__cxa_allocate_exception(i64 4)
// CHECK-NEXT: store i8* [[EXN]], i8** [[EXN_SAVE]]
// CHECK-NEXT: store i1 true, i1* [[EXN_ACTIVE]]
// CHECK-NEXT: [[T0:%.*]] = bitcast i8* [[EXN]] to [[B:%.*]]*
// CHECK-NEXT: invoke void @_ZN6test161AC1Ev([[A]]* [[TEMP]])
// CHECK: store i1 true, i1* [[TEMP_ACTIVE]]
// CHECK-NEXT: invoke void @_ZN6test161BC1ERKNS_1AE([[B]]* [[T0]], [[A]]* [[TEMP]])
// CHECK: store i1 false, i1* [[EXN_ACTIVE]]
// CHECK-NEXT: invoke void @__cxa_throw(i8* [[EXN]],
// CHECK: invoke void @_ZN6test163fooEv()
// CHECK: br label
// CHECK: invoke void @_ZN6test161AD1Ev([[A]]* [[TEMP]])
// CHECK: ret void
// CHECK: [[T0:%.*]] = load i1* [[EXN_ACTIVE]]
// CHECK-NEXT: br i1 [[T0]]
// CHECK: [[T1:%.*]] = load i8** [[EXN_SAVE]]
// CHECK-NEXT: call void @__cxa_free_exception(i8* [[T1]])
// CHECK-NEXT: br label
}
}